Skip to content

Commit e537439

Browse files
Eugene Crosserdavem330
authored andcommitted
af_iucv: use paged SKBs for big outbound messages
When an outbound message is bigger than a page, allocate and fill a paged SKB, and subsequently use IUCV send primitive with IPBUFLST flag. This relaxes the pressure to allocate big contiguous kernel buffers. Signed-off-by: Eugene Crosser <[email protected]> Signed-off-by: Ursula Braun <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 818d49a commit e537439

File tree

1 file changed

+77
-47
lines changed

1 file changed

+77
-47
lines changed

net/iucv/af_iucv.c

Lines changed: 77 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10331033
{
10341034
struct sock *sk = sock->sk;
10351035
struct iucv_sock *iucv = iucv_sk(sk);
1036+
size_t headroom, linear;
10361037
struct sk_buff *skb;
10371038
struct iucv_message txmsg = {0};
10381039
struct cmsghdr *cmsg;
@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
11101111
* this is fine for SOCK_SEQPACKET (unless we want to support
11111112
* segmented records using the MSG_EOR flag), but
11121113
* for SOCK_STREAM we might want to improve it in future */
1113-
if (iucv->transport == AF_IUCV_TRANS_HIPER)
1114-
skb = sock_alloc_send_skb(sk,
1115-
len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1116-
noblock, &err);
1117-
else
1118-
skb = sock_alloc_send_skb(sk, len, noblock, &err);
1114+
headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
1115+
? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
1116+
if (headroom + len < PAGE_SIZE) {
1117+
linear = len;
1118+
} else {
1119+
/* In nonlinear "classic" iucv skb,
1120+
* reserve space for iucv_array
1121+
*/
1122+
if (iucv->transport != AF_IUCV_TRANS_HIPER)
1123+
headroom += sizeof(struct iucv_array) *
1124+
(MAX_SKB_FRAGS + 1);
1125+
linear = PAGE_SIZE - headroom;
1126+
}
1127+
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1128+
noblock, &err, 0);
11191129
if (!skb)
11201130
goto out;
1121-
if (iucv->transport == AF_IUCV_TRANS_HIPER)
1122-
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1123-
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1124-
err = -EFAULT;
1131+
if (headroom)
1132+
skb_reserve(skb, headroom);
1133+
skb_put(skb, linear);
1134+
skb->len = len;
1135+
skb->data_len = len - linear;
1136+
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1137+
if (err)
11251138
goto fail;
1126-
}
11271139

11281140
/* wait if outstanding messages for iucv path has reached */
11291141
timeo = sock_sndtimeo(sk, noblock);
@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
11481160
atomic_dec(&iucv->msg_sent);
11491161
goto fail;
11501162
}
1151-
goto release;
1152-
}
1153-
skb_queue_tail(&iucv->send_skb_q, skb);
1154-
1155-
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1156-
&& skb->len <= 7) {
1157-
err = iucv_send_iprm(iucv->path, &txmsg, skb);
1163+
} else { /* Classic VM IUCV transport */
1164+
skb_queue_tail(&iucv->send_skb_q, skb);
1165+
1166+
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1167+
skb->len <= 7) {
1168+
err = iucv_send_iprm(iucv->path, &txmsg, skb);
1169+
1170+
/* on success: there is no message_complete callback */
1171+
/* for an IPRMDATA msg; remove skb from send queue */
1172+
if (err == 0) {
1173+
skb_unlink(skb, &iucv->send_skb_q);
1174+
kfree_skb(skb);
1175+
}
11581176

1159-
/* on success: there is no message_complete callback
1160-
* for an IPRMDATA msg; remove skb from send queue */
1161-
if (err == 0) {
1162-
skb_unlink(skb, &iucv->send_skb_q);
1163-
kfree_skb(skb);
1177+
/* this error should never happen since the */
1178+
/* IUCV_IPRMDATA path flag is set... sever path */
1179+
if (err == 0x15) {
1180+
pr_iucv->path_sever(iucv->path, NULL);
1181+
skb_unlink(skb, &iucv->send_skb_q);
1182+
err = -EPIPE;
1183+
goto fail;
1184+
}
1185+
} else if (skb_is_nonlinear(skb)) {
1186+
struct iucv_array *iba = (struct iucv_array *)skb->head;
1187+
int i;
1188+
1189+
/* skip iucv_array lying in the headroom */
1190+
iba[0].address = (u32)(addr_t)skb->data;
1191+
iba[0].length = (u32)skb_headlen(skb);
1192+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1193+
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1194+
1195+
iba[i + 1].address =
1196+
(u32)(addr_t)skb_frag_address(frag);
1197+
iba[i + 1].length = (u32)skb_frag_size(frag);
1198+
}
1199+
err = pr_iucv->message_send(iucv->path, &txmsg,
1200+
IUCV_IPBUFLST, 0,
1201+
(void *)iba, skb->len);
1202+
} else { /* non-IPRM Linear skb */
1203+
err = pr_iucv->message_send(iucv->path, &txmsg,
1204+
0, 0, (void *)skb->data, skb->len);
11641205
}
1165-
1166-
/* this error should never happen since the
1167-
* IUCV_IPRMDATA path flag is set... sever path */
1168-
if (err == 0x15) {
1169-
pr_iucv->path_sever(iucv->path, NULL);
1206+
if (err) {
1207+
if (err == 3) {
1208+
user_id[8] = 0;
1209+
memcpy(user_id, iucv->dst_user_id, 8);
1210+
appl_id[8] = 0;
1211+
memcpy(appl_id, iucv->dst_name, 8);
1212+
pr_err(
1213+
"Application %s on z/VM guest %s exceeds message limit\n",
1214+
appl_id, user_id);
1215+
err = -EAGAIN;
1216+
} else {
1217+
err = -EPIPE;
1218+
}
11701219
skb_unlink(skb, &iucv->send_skb_q);
1171-
err = -EPIPE;
11721220
goto fail;
11731221
}
1174-
} else
1175-
err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1176-
(void *) skb->data, skb->len);
1177-
if (err) {
1178-
if (err == 3) {
1179-
user_id[8] = 0;
1180-
memcpy(user_id, iucv->dst_user_id, 8);
1181-
appl_id[8] = 0;
1182-
memcpy(appl_id, iucv->dst_name, 8);
1183-
pr_err("Application %s on z/VM guest %s"
1184-
" exceeds message limit\n",
1185-
appl_id, user_id);
1186-
err = -EAGAIN;
1187-
} else
1188-
err = -EPIPE;
1189-
skb_unlink(skb, &iucv->send_skb_q);
1190-
goto fail;
11911222
}
11921223

1193-
release:
11941224
release_sock(sk);
11951225
return len;
11961226

0 commit comments

Comments
 (0)