Skip to content

Commit c9ad5a6

Browse files
committed
Merge branch 'af_iucv-big-bufs'
Ursula Braun says: ==================== s390: af_iucv patches here are improvements for af_iucv relaxing the pressure to allocate big contiguous kernel buffers. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 818d49a + a006353 commit c9ad5a6

File tree

1 file changed

+122
-101
lines changed

1 file changed

+122
-101
lines changed

net/iucv/af_iucv.c

Lines changed: 122 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
10331033
{
10341034
struct sock *sk = sock->sk;
10351035
struct iucv_sock *iucv = iucv_sk(sk);
1036+
size_t headroom, linear;
10361037
struct sk_buff *skb;
10371038
struct iucv_message txmsg = {0};
10381039
struct cmsghdr *cmsg;
@@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
11101111
* this is fine for SOCK_SEQPACKET (unless we want to support
11111112
* segmented records using the MSG_EOR flag), but
11121113
* for SOCK_STREAM we might want to improve it in future */
1113-
if (iucv->transport == AF_IUCV_TRANS_HIPER)
1114-
skb = sock_alloc_send_skb(sk,
1115-
len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1116-
noblock, &err);
1117-
else
1118-
skb = sock_alloc_send_skb(sk, len, noblock, &err);
1114+
headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
1115+
? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
1116+
if (headroom + len < PAGE_SIZE) {
1117+
linear = len;
1118+
} else {
1119+
/* In nonlinear "classic" iucv skb,
1120+
* reserve space for iucv_array
1121+
*/
1122+
if (iucv->transport != AF_IUCV_TRANS_HIPER)
1123+
headroom += sizeof(struct iucv_array) *
1124+
(MAX_SKB_FRAGS + 1);
1125+
linear = PAGE_SIZE - headroom;
1126+
}
1127+
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1128+
noblock, &err, 0);
11191129
if (!skb)
11201130
goto out;
1121-
if (iucv->transport == AF_IUCV_TRANS_HIPER)
1122-
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1123-
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1124-
err = -EFAULT;
1131+
if (headroom)
1132+
skb_reserve(skb, headroom);
1133+
skb_put(skb, linear);
1134+
skb->len = len;
1135+
skb->data_len = len - linear;
1136+
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1137+
if (err)
11251138
goto fail;
1126-
}
11271139

11281140
/* wait if outstanding messages for iucv path has reached */
11291141
timeo = sock_sndtimeo(sk, noblock);
@@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
11481160
atomic_dec(&iucv->msg_sent);
11491161
goto fail;
11501162
}
1151-
goto release;
1152-
}
1153-
skb_queue_tail(&iucv->send_skb_q, skb);
1154-
1155-
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1156-
&& skb->len <= 7) {
1157-
err = iucv_send_iprm(iucv->path, &txmsg, skb);
1163+
} else { /* Classic VM IUCV transport */
1164+
skb_queue_tail(&iucv->send_skb_q, skb);
1165+
1166+
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1167+
skb->len <= 7) {
1168+
err = iucv_send_iprm(iucv->path, &txmsg, skb);
1169+
1170+
/* on success: there is no message_complete callback */
1171+
/* for an IPRMDATA msg; remove skb from send queue */
1172+
if (err == 0) {
1173+
skb_unlink(skb, &iucv->send_skb_q);
1174+
kfree_skb(skb);
1175+
}
11581176

1159-
/* on success: there is no message_complete callback
1160-
* for an IPRMDATA msg; remove skb from send queue */
1161-
if (err == 0) {
1162-
skb_unlink(skb, &iucv->send_skb_q);
1163-
kfree_skb(skb);
1177+
/* this error should never happen since the */
1178+
/* IUCV_IPRMDATA path flag is set... sever path */
1179+
if (err == 0x15) {
1180+
pr_iucv->path_sever(iucv->path, NULL);
1181+
skb_unlink(skb, &iucv->send_skb_q);
1182+
err = -EPIPE;
1183+
goto fail;
1184+
}
1185+
} else if (skb_is_nonlinear(skb)) {
1186+
struct iucv_array *iba = (struct iucv_array *)skb->head;
1187+
int i;
1188+
1189+
/* skip iucv_array lying in the headroom */
1190+
iba[0].address = (u32)(addr_t)skb->data;
1191+
iba[0].length = (u32)skb_headlen(skb);
1192+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1193+
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1194+
1195+
iba[i + 1].address =
1196+
(u32)(addr_t)skb_frag_address(frag);
1197+
iba[i + 1].length = (u32)skb_frag_size(frag);
1198+
}
1199+
err = pr_iucv->message_send(iucv->path, &txmsg,
1200+
IUCV_IPBUFLST, 0,
1201+
(void *)iba, skb->len);
1202+
} else { /* non-IPRM Linear skb */
1203+
err = pr_iucv->message_send(iucv->path, &txmsg,
1204+
0, 0, (void *)skb->data, skb->len);
11641205
}
1165-
1166-
/* this error should never happen since the
1167-
* IUCV_IPRMDATA path flag is set... sever path */
1168-
if (err == 0x15) {
1169-
pr_iucv->path_sever(iucv->path, NULL);
1206+
if (err) {
1207+
if (err == 3) {
1208+
user_id[8] = 0;
1209+
memcpy(user_id, iucv->dst_user_id, 8);
1210+
appl_id[8] = 0;
1211+
memcpy(appl_id, iucv->dst_name, 8);
1212+
pr_err(
1213+
"Application %s on z/VM guest %s exceeds message limit\n",
1214+
appl_id, user_id);
1215+
err = -EAGAIN;
1216+
} else {
1217+
err = -EPIPE;
1218+
}
11701219
skb_unlink(skb, &iucv->send_skb_q);
1171-
err = -EPIPE;
11721220
goto fail;
11731221
}
1174-
} else
1175-
err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1176-
(void *) skb->data, skb->len);
1177-
if (err) {
1178-
if (err == 3) {
1179-
user_id[8] = 0;
1180-
memcpy(user_id, iucv->dst_user_id, 8);
1181-
appl_id[8] = 0;
1182-
memcpy(appl_id, iucv->dst_name, 8);
1183-
pr_err("Application %s on z/VM guest %s"
1184-
" exceeds message limit\n",
1185-
appl_id, user_id);
1186-
err = -EAGAIN;
1187-
} else
1188-
err = -EPIPE;
1189-
skb_unlink(skb, &iucv->send_skb_q);
1190-
goto fail;
11911222
}
11921223

1193-
release:
11941224
release_sock(sk);
11951225
return len;
11961226

@@ -1201,42 +1231,32 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
12011231
return err;
12021232
}
12031233

1204-
/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1205-
*
1206-
* Locking: must be called with message_q.lock held
1207-
*/
1208-
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1234+
static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
12091235
{
1210-
int dataleft, size, copied = 0;
1211-
struct sk_buff *nskb;
1212-
1213-
dataleft = len;
1214-
while (dataleft) {
1215-
if (dataleft >= sk->sk_rcvbuf / 4)
1216-
size = sk->sk_rcvbuf / 4;
1217-
else
1218-
size = dataleft;
1219-
1220-
nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1221-
if (!nskb)
1222-
return -ENOMEM;
1223-
1224-
/* copy target class to control buffer of new skb */
1225-
IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1226-
1227-
/* copy data fragment */
1228-
memcpy(nskb->data, skb->data + copied, size);
1229-
copied += size;
1230-
dataleft -= size;
1231-
1232-
skb_reset_transport_header(nskb);
1233-
skb_reset_network_header(nskb);
1234-
nskb->len = size;
1236+
size_t headroom, linear;
1237+
struct sk_buff *skb;
1238+
int err;
12351239

1236-
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1240+
if (len < PAGE_SIZE) {
1241+
headroom = 0;
1242+
linear = len;
1243+
} else {
1244+
headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1245+
linear = PAGE_SIZE - headroom;
1246+
}
1247+
skb = alloc_skb_with_frags(headroom + linear, len - linear,
1248+
0, &err, GFP_ATOMIC | GFP_DMA);
1249+
WARN_ONCE(!skb,
1250+
"alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1251+
len, err);
1252+
if (skb) {
1253+
if (headroom)
1254+
skb_reserve(skb, headroom);
1255+
skb_put(skb, linear);
1256+
skb->len = len;
1257+
skb->data_len = len - linear;
12371258
}
1238-
1239-
return 0;
1259+
return skb;
12401260
}
12411261

12421262
/* iucv_process_message() - Receive a single outstanding IUCV message
@@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
12631283
skb->len = 0;
12641284
}
12651285
} else {
1266-
rc = pr_iucv->message_receive(path, msg,
1286+
if (skb_is_nonlinear(skb)) {
1287+
struct iucv_array *iba = (struct iucv_array *)skb->head;
1288+
int i;
1289+
1290+
iba[0].address = (u32)(addr_t)skb->data;
1291+
iba[0].length = (u32)skb_headlen(skb);
1292+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1293+
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1294+
1295+
iba[i + 1].address =
1296+
(u32)(addr_t)skb_frag_address(frag);
1297+
iba[i + 1].length = (u32)skb_frag_size(frag);
1298+
}
1299+
rc = pr_iucv->message_receive(path, msg,
1300+
IUCV_IPBUFLST,
1301+
(void *)iba, len, NULL);
1302+
} else {
1303+
rc = pr_iucv->message_receive(path, msg,
12671304
msg->flags & IUCV_IPRMDATA,
12681305
skb->data, len, NULL);
1306+
}
12691307
if (rc) {
12701308
kfree_skb(skb);
12711309
return;
12721310
}
1273-
/* we need to fragment iucv messages for SOCK_STREAM only;
1274-
* for SOCK_SEQPACKET, it is only relevant if we support
1275-
* record segmentation using MSG_EOR (see also recvmsg()) */
1276-
if (sk->sk_type == SOCK_STREAM &&
1277-
skb->truesize >= sk->sk_rcvbuf / 4) {
1278-
rc = iucv_fragment_skb(sk, skb, len);
1279-
kfree_skb(skb);
1280-
skb = NULL;
1281-
if (rc) {
1282-
pr_iucv->path_sever(path, NULL);
1283-
return;
1284-
}
1285-
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1286-
} else {
1287-
skb_reset_transport_header(skb);
1288-
skb_reset_network_header(skb);
1289-
skb->len = len;
1290-
}
1311+
WARN_ON_ONCE(skb->len != len);
12911312
}
12921313

12931314
IUCV_SKB_CB(skb)->offset = 0;
@@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk)
13061327
struct sock_msg_q *p, *n;
13071328

13081329
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1309-
skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1330+
skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
13101331
if (!skb)
13111332
break;
13121333
iucv_process_message(sk, skb, p->path, &p->msg);
@@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
18011822
if (len > sk->sk_rcvbuf)
18021823
goto save_message;
18031824

1804-
skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1825+
skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
18051826
if (!skb)
18061827
goto save_message;
18071828

0 commit comments

Comments
 (0)