Skip to content

Commit a006353

Browse files
Eugene Crosserdavem330
authored andcommitted
af_iucv: use paged SKBs for big inbound messages
When an inbound message is bigger than a page, allocate a paged SKB, and subsequently use IUCV receive primitive with IPBUFLST flag. This relaxes the pressure to allocate big contiguous kernel buffers. Signed-off-by: Eugene Crosser <[email protected]> Signed-off-by: Ursula Braun <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 291759a commit a006353

File tree

1 file changed

+50
-6
lines changed

1 file changed

+50
-6
lines changed

net/iucv/af_iucv.c

Lines changed: 50 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1231,6 +1231,34 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
12311231
return err;
12321232
}
12331233

1234+
static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1235+
{
1236+
size_t headroom, linear;
1237+
struct sk_buff *skb;
1238+
int err;
1239+
1240+
if (len < PAGE_SIZE) {
1241+
headroom = 0;
1242+
linear = len;
1243+
} else {
1244+
headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1245+
linear = PAGE_SIZE - headroom;
1246+
}
1247+
skb = alloc_skb_with_frags(headroom + linear, len - linear,
1248+
0, &err, GFP_ATOMIC | GFP_DMA);
1249+
WARN_ONCE(!skb,
1250+
"alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1251+
len, err);
1252+
if (skb) {
1253+
if (headroom)
1254+
skb_reserve(skb, headroom);
1255+
skb_put(skb, linear);
1256+
skb->len = len;
1257+
skb->data_len = len - linear;
1258+
}
1259+
return skb;
1260+
}
1261+
12341262
/* iucv_process_message() - Receive a single outstanding IUCV message
12351263
*
12361264
* Locking: must be called with message_q.lock held
@@ -1255,16 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
12551283
skb->len = 0;
12561284
}
12571285
} else {
1258-
rc = pr_iucv->message_receive(path, msg,
1286+
if (skb_is_nonlinear(skb)) {
1287+
struct iucv_array *iba = (struct iucv_array *)skb->head;
1288+
int i;
1289+
1290+
iba[0].address = (u32)(addr_t)skb->data;
1291+
iba[0].length = (u32)skb_headlen(skb);
1292+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1293+
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1294+
1295+
iba[i + 1].address =
1296+
(u32)(addr_t)skb_frag_address(frag);
1297+
iba[i + 1].length = (u32)skb_frag_size(frag);
1298+
}
1299+
rc = pr_iucv->message_receive(path, msg,
1300+
IUCV_IPBUFLST,
1301+
(void *)iba, len, NULL);
1302+
} else {
1303+
rc = pr_iucv->message_receive(path, msg,
12591304
msg->flags & IUCV_IPRMDATA,
12601305
skb->data, len, NULL);
1306+
}
12611307
if (rc) {
12621308
kfree_skb(skb);
12631309
return;
12641310
}
1265-
skb_reset_transport_header(skb);
1266-
skb_reset_network_header(skb);
1267-
skb->len = len;
1311+
WARN_ON_ONCE(skb->len != len);
12681312
}
12691313

12701314
IUCV_SKB_CB(skb)->offset = 0;
@@ -1283,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
12831327
struct sock_msg_q *p, *n;
12841328

12851329
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1286-
skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1330+
skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
12871331
if (!skb)
12881332
break;
12891333
iucv_process_message(sk, skb, p->path, &p->msg);
@@ -1778,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
17781822
if (len > sk->sk_rcvbuf)
17791823
goto save_message;
17801824

1781-
skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1825+
skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
17821826
if (!skb)
17831827
goto save_message;
17841828

0 commit comments

Comments
 (0)