Skip to content

Commit cbbdee9

Browse files
kuba-moodavem330
authored andcommitted
tls: rx: async: don't put async zc on the list
The "zero-copy" path in SW TLS will engage either for no skbs or for all but last. If the recvmsg parameters are right and the socket can do ZC we'll ZC until the iterator can't fit a full record at which point we'll decrypt one more record and copy over the necessary bits to fill up the request. The only reason we hold onto the ZC skbs which went thru the async path until the end of recvmsg() is to count bytes. We need an accurate count of zc'ed bytes so that we can calculate how much of the non-zc'd data to copy. To allow freeing input skbs on the ZC path count only how much of the list we'll need to consume. Signed-off-by: Jakub Kicinski <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c618db2 commit cbbdee9

File tree

1 file changed

+19
-21
lines changed

1 file changed

+19
-21
lines changed

net/tls/tls_sw.c

Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1675,7 +1675,6 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
16751675
u8 *control,
16761676
size_t skip,
16771677
size_t len,
1678-
bool zc,
16791678
bool is_peek)
16801679
{
16811680
struct sk_buff *skb = skb_peek(&ctx->rx_list);
@@ -1709,12 +1708,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
17091708
if (err <= 0)
17101709
goto out;
17111710

1712-
if (!zc || (rxm->full_len - skip) > len) {
1713-
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1714-
msg, chunk);
1715-
if (err < 0)
1716-
goto out;
1717-
}
1711+
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1712+
msg, chunk);
1713+
if (err < 0)
1714+
goto out;
17181715

17191716
len = len - chunk;
17201717
copied = copied + chunk;
@@ -1824,9 +1821,9 @@ int tls_sw_recvmsg(struct sock *sk,
18241821
struct tls_context *tls_ctx = tls_get_ctx(sk);
18251822
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
18261823
struct tls_prot_info *prot = &tls_ctx->prot_info;
1824+
ssize_t decrypted = 0, async_copy_bytes = 0;
18271825
struct sk_psock *psock;
18281826
unsigned char control = 0;
1829-
ssize_t decrypted = 0;
18301827
size_t flushed_at = 0;
18311828
struct strp_msg *rxm;
18321829
struct tls_msg *tlm;
@@ -1855,7 +1852,7 @@ int tls_sw_recvmsg(struct sock *sk,
18551852
goto end;
18561853

18571854
/* Process pending decrypted records. It must be non-zero-copy */
1858-
err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
1855+
err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
18591856
if (err < 0)
18601857
goto end;
18611858

@@ -1939,19 +1936,20 @@ int tls_sw_recvmsg(struct sock *sk,
19391936
chunk = rxm->full_len;
19401937
tls_rx_rec_done(ctx);
19411938

1942-
if (async) {
1943-
/* TLS 1.2-only, to_decrypt must be text length */
1944-
chunk = min_t(int, to_decrypt, len);
1945-
put_on_rx_list:
1946-
decrypted += chunk;
1947-
len -= chunk;
1948-
__skb_queue_tail(&ctx->rx_list, skb);
1949-
continue;
1950-
}
1951-
19521939
if (!darg.zc) {
19531940
bool partially_consumed = chunk > len;
19541941

1942+
if (async) {
1943+
/* TLS 1.2-only, to_decrypt must be text len */
1944+
chunk = min_t(int, to_decrypt, len);
1945+
async_copy_bytes += chunk;
1946+
put_on_rx_list:
1947+
decrypted += chunk;
1948+
len -= chunk;
1949+
__skb_queue_tail(&ctx->rx_list, skb);
1950+
continue;
1951+
}
1952+
19551953
if (bpf_strp_enabled) {
19561954
err = sk_psock_tls_strp_read(psock, skb);
19571955
if (err != __SK_PASS) {
@@ -2018,10 +2016,10 @@ int tls_sw_recvmsg(struct sock *sk,
20182016
/* Drain records from the rx_list & copy if required */
20192017
if (is_peek || is_kvec)
20202018
err = process_rx_list(ctx, msg, &control, copied,
2021-
decrypted, false, is_peek);
2019+
decrypted, is_peek);
20222020
else
20232021
err = process_rx_list(ctx, msg, &control, 0,
2024-
decrypted, true, is_peek);
2022+
async_copy_bytes, is_peek);
20252023
decrypted = max(err, 0);
20262024
}
20272025

0 commit comments

Comments
 (0)