Skip to content

Commit 4ff3dfc

Browse files
author
Paolo Abeni
committed
Merge branch 'splice-net-handle-msg_splice_pages-in-chelsio-tls'
David Howells says: ==================== splice, net: Handle MSG_SPLICE_PAGES in Chelsio-TLS Here are patches to make Chelsio-TLS handle the MSG_SPLICE_PAGES internal sendmsg flag. MSG_SPLICE_PAGES is an internal hint that tells the protocol that it should splice the pages supplied if it can. Its sendpage implementation is then turned into a wrapper around that. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
2 parents 735c9ee + 26acc98 commit 4ff3dfc

File tree

1 file changed

+18
-103
lines changed
  • drivers/net/ethernet/chelsio/inline_crypto/chtls

1 file changed

+18
-103
lines changed

drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c

Lines changed: 18 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,17 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
10921092
if (copy > size)
10931093
copy = size;
10941094

1095-
if (skb_tailroom(skb) > 0) {
1095+
if (msg->msg_flags & MSG_SPLICE_PAGES) {
1096+
err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1097+
sk->sk_allocation);
1098+
if (err < 0) {
1099+
if (err == -EMSGSIZE)
1100+
goto new_buf;
1101+
goto do_fault;
1102+
}
1103+
copy = err;
1104+
sk_wmem_queued_add(sk, copy);
1105+
} else if (skb_tailroom(skb) > 0) {
10961106
copy = min(copy, skb_tailroom(skb));
10971107
if (is_tls_tx(csk))
10981108
copy = min_t(int, copy, csk->tlshws.txleft);
@@ -1230,110 +1240,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
12301240
int chtls_sendpage(struct sock *sk, struct page *page,
12311241
int offset, size_t size, int flags)
12321242
{
1233-
struct chtls_sock *csk;
1234-
struct chtls_dev *cdev;
1235-
int mss, err, copied;
1236-
struct tcp_sock *tp;
1237-
long timeo;
1238-
1239-
tp = tcp_sk(sk);
1240-
copied = 0;
1241-
csk = rcu_dereference_sk_user_data(sk);
1242-
cdev = csk->cdev;
1243-
lock_sock(sk);
1244-
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1245-
1246-
err = sk_stream_wait_connect(sk, &timeo);
1247-
if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1248-
err != 0)
1249-
goto out_err;
1250-
1251-
mss = csk->mss;
1252-
csk_set_flag(csk, CSK_TX_MORE_DATA);
1253-
1254-
while (size > 0) {
1255-
struct sk_buff *skb = skb_peek_tail(&csk->txq);
1256-
int copy, i;
1243+
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
1244+
struct bio_vec bvec;
12571245

1258-
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
1259-
(copy = mss - skb->len) <= 0) {
1260-
new_buf:
1261-
if (!csk_mem_free(cdev, sk))
1262-
goto wait_for_sndbuf;
1263-
1264-
if (is_tls_tx(csk)) {
1265-
skb = get_record_skb(sk,
1266-
select_size(sk, size,
1267-
flags,
1268-
TX_TLSHDR_LEN),
1269-
true);
1270-
} else {
1271-
skb = get_tx_skb(sk, 0);
1272-
}
1273-
if (!skb)
1274-
goto wait_for_memory;
1275-
copy = mss;
1276-
}
1277-
if (copy > size)
1278-
copy = size;
1279-
1280-
i = skb_shinfo(skb)->nr_frags;
1281-
if (skb_can_coalesce(skb, i, page, offset)) {
1282-
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1283-
} else if (i < MAX_SKB_FRAGS) {
1284-
get_page(page);
1285-
skb_fill_page_desc(skb, i, page, offset, copy);
1286-
} else {
1287-
tx_skb_finalize(skb);
1288-
push_frames_if_head(sk);
1289-
goto new_buf;
1290-
}
1246+
if (flags & MSG_SENDPAGE_NOTLAST)
1247+
msg.msg_flags |= MSG_MORE;
12911248

1292-
skb->len += copy;
1293-
if (skb->len == mss)
1294-
tx_skb_finalize(skb);
1295-
skb->data_len += copy;
1296-
skb->truesize += copy;
1297-
sk->sk_wmem_queued += copy;
1298-
tp->write_seq += copy;
1299-
copied += copy;
1300-
offset += copy;
1301-
size -= copy;
1302-
1303-
if (corked(tp, flags) &&
1304-
(sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
1305-
ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
1306-
1307-
if (!size)
1308-
break;
1309-
1310-
if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
1311-
push_frames_if_head(sk);
1312-
continue;
1313-
wait_for_sndbuf:
1314-
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1315-
wait_for_memory:
1316-
err = csk_wait_memory(cdev, sk, &timeo);
1317-
if (err)
1318-
goto do_error;
1319-
}
1320-
out:
1321-
csk_reset_flag(csk, CSK_TX_MORE_DATA);
1322-
if (copied)
1323-
chtls_tcp_push(sk, flags);
1324-
done:
1325-
release_sock(sk);
1326-
return copied;
1327-
1328-
do_error:
1329-
if (copied)
1330-
goto out;
1331-
1332-
out_err:
1333-
if (csk_conn_inline(csk))
1334-
csk_reset_flag(csk, CSK_TX_MORE_DATA);
1335-
copied = sk_stream_error(sk, flags, err);
1336-
goto done;
1249+
bvec_set_page(&bvec, page, size, offset);
1250+
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
1251+
return chtls_sendmsg(sk, &msg, size);
13371252
}
13381253

13391254
static void chtls_select_window(struct sock *sk)

0 commit comments

Comments
 (0)