Skip to content

Commit a42055e

Browse files
nxa22042davem330
authored andcommitted
net/tls: Add support for async encryption of records for performance
In current implementation, tls records are encrypted & transmitted serially. Till the time the previously submitted user data is encrypted, the implementation waits and on finish starts transmitting the record. This approach of encrypt-one record at a time is inefficient when asynchronous crypto accelerators are used. For each record, there are overheads of interrupts, driver softIRQ scheduling etc. Also the crypto accelerator sits idle most of time while an encrypted record's pages are handed over to tcp stack for transmission. This patch enables encryption of multiple records in parallel when an async capable crypto accelerator is present in system. This is achieved by allowing the user space application to send more data using sendmsg() even while previously issued data is being processed by crypto accelerator. This requires returning the control back to user space application after submitting encryption request to accelerator. This also means that zero-copy mode of encryption cannot be used with async accelerator as we must be done with user space application buffer before returning from sendmsg(). There can be multiple records in flight to/from the accelerator. Each of the record is represented by 'struct tls_rec'. This is used to store the memory pages for the record. After the records are encrypted, they are added in a linked list called tx_ready_list which contains encrypted tls records sorted as per tls sequence number. The records from tx_ready_list are transmitted using a newly introduced function called tls_tx_records(). The tx_ready_list is polled for any record ready to be transmitted in sendmsg(), sendpage() after initiating encryption of new tls records. This achieves parallel encryption and transmission of records when async accelerator is present. There could be situation when crypto accelerator completes encryption later than polling of tx_ready_list by sendmsg()/sendpage(). Therefore we need a deferred work context to be able to transmit records from tx_ready_list. The deferred work context gets scheduled if applications are not sending much data through the socket. If the applications issue sendmsg()/sendpage() in quick succession, then the scheduling of tx_work_handler gets cancelled as the tx_ready_list would be polled from application's context itself. This saves scheduling overhead of deferred work. The patch also brings some side benefit. We are able to get rid of the concept of CLOSED record. This is because the records once closed are either encrypted and then placed into tx_ready_list or if encryption fails, the socket error is set. This simplifies the kernel tls sendpath. However since tls_device.c is still using macros, accessory functions for CLOSED records have been retained. Signed-off-by: Vakul Garg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 06983aa commit a42055e

File tree

3 files changed

+522
-188
lines changed

3 files changed

+522
-188
lines changed

include/net/tls.h

Lines changed: 58 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
#include <linux/tcp.h>
4242
#include <net/tcp.h>
4343
#include <net/strparser.h>
44-
44+
#include <crypto/aead.h>
4545
#include <uapi/linux/tls.h>
4646

4747

@@ -93,24 +93,47 @@ enum {
9393
TLS_NUM_CONFIG,
9494
};
9595

96-
struct tls_sw_context_tx {
97-
struct crypto_aead *aead_send;
98-
struct crypto_wait async_wait;
99-
100-
char aad_space[TLS_AAD_SPACE_SIZE];
101-
102-
unsigned int sg_plaintext_size;
103-
int sg_plaintext_num_elem;
96+
/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
97+
* allocated or mapped for each TLS record. After encryption, the records are
98+
* stores in a linked list.
99+
*/
100+
struct tls_rec {
101+
struct list_head list;
102+
int tx_flags;
104103
struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
105-
106-
unsigned int sg_encrypted_size;
107-
int sg_encrypted_num_elem;
108104
struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
109105

110106
/* AAD | sg_plaintext_data | sg_tag */
111107
struct scatterlist sg_aead_in[2];
112108
/* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
113109
struct scatterlist sg_aead_out[2];
110+
111+
unsigned int sg_plaintext_size;
112+
unsigned int sg_encrypted_size;
113+
int sg_plaintext_num_elem;
114+
int sg_encrypted_num_elem;
115+
116+
char aad_space[TLS_AAD_SPACE_SIZE];
117+
struct aead_request aead_req;
118+
u8 aead_req_ctx[];
119+
};
120+
121+
struct tx_work {
122+
struct delayed_work work;
123+
struct sock *sk;
124+
};
125+
126+
struct tls_sw_context_tx {
127+
struct crypto_aead *aead_send;
128+
struct crypto_wait async_wait;
129+
struct tx_work tx_work;
130+
struct tls_rec *open_rec;
131+
struct list_head tx_ready_list;
132+
atomic_t encrypt_pending;
133+
int async_notify;
134+
135+
#define BIT_TX_SCHEDULED 0
136+
unsigned long tx_bitmask;
114137
};
115138

116139
struct tls_sw_context_rx {
@@ -197,6 +220,8 @@ struct tls_context {
197220

198221
struct scatterlist *partially_sent_record;
199222
u16 partially_sent_offset;
223+
u64 tx_seq_number; /* Next TLS seqnum to be transmitted */
224+
200225
unsigned long flags;
201226
bool in_tcp_sendpages;
202227

@@ -261,6 +286,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
261286
void tls_device_sk_destruct(struct sock *sk);
262287
void tls_device_init(void);
263288
void tls_device_cleanup(void);
289+
int tls_tx_records(struct sock *sk, int flags);
264290

265291
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
266292
u32 seq, u64 *p_record_sn);
@@ -279,6 +305,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
279305
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
280306
struct scatterlist *sg, u16 first_offset,
281307
int flags);
308+
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
309+
int flags);
310+
282311
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
283312
int flags, long *timeo);
284313

@@ -312,6 +341,23 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
312341
return tls_ctx->pending_open_record_frags;
313342
}
314343

344+
static inline bool is_tx_ready(struct tls_context *tls_ctx,
345+
struct tls_sw_context_tx *ctx)
346+
{
347+
struct tls_rec *rec;
348+
u64 seq;
349+
350+
rec = list_first_entry(&ctx->tx_ready_list, struct tls_rec, list);
351+
if (!rec)
352+
return false;
353+
354+
seq = be64_to_cpup((const __be64 *)&rec->aad_space);
355+
if (seq == tls_ctx->tx_seq_number)
356+
return true;
357+
else
358+
return false;
359+
}
360+
315361
struct sk_buff *
316362
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
317363
struct sk_buff *skb);

net/tls/tls_main.c

Lines changed: 21 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,6 @@ int tls_push_sg(struct sock *sk,
141141
size = sg->length;
142142
}
143143

144-
clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
145144
ctx->in_tcp_sendpages = false;
146145
ctx->sk_write_space(sk);
147146

@@ -193,25 +192,36 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
193192
return rc;
194193
}
195194

196-
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
197-
int flags, long *timeo)
195+
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
196+
int flags)
198197
{
199198
struct scatterlist *sg;
200199
u16 offset;
201200

202-
if (!tls_is_partially_sent_record(ctx))
203-
return ctx->push_pending_record(sk, flags);
204-
205201
sg = ctx->partially_sent_record;
206202
offset = ctx->partially_sent_offset;
207203

208204
ctx->partially_sent_record = NULL;
209205
return tls_push_sg(sk, ctx, sg, offset, flags);
210206
}
211207

208+
int tls_push_pending_closed_record(struct sock *sk,
209+
struct tls_context *tls_ctx,
210+
int flags, long *timeo)
211+
{
212+
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
213+
214+
if (tls_is_partially_sent_record(tls_ctx) ||
215+
!list_empty(&ctx->tx_ready_list))
216+
return tls_tx_records(sk, flags);
217+
else
218+
return tls_ctx->push_pending_record(sk, flags);
219+
}
220+
212221
static void tls_write_space(struct sock *sk)
213222
{
214223
struct tls_context *ctx = tls_get_ctx(sk);
224+
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
215225

216226
/* If in_tcp_sendpages call lower protocol write space handler
217227
* to ensure we wake up any waiting operations there. For example
@@ -222,20 +232,11 @@ static void tls_write_space(struct sock *sk)
222232
return;
223233
}
224234

225-
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
226-
gfp_t sk_allocation = sk->sk_allocation;
227-
int rc;
228-
long timeo = 0;
229-
230-
sk->sk_allocation = GFP_ATOMIC;
231-
rc = tls_push_pending_closed_record(sk, ctx,
232-
MSG_DONTWAIT |
233-
MSG_NOSIGNAL,
234-
&timeo);
235-
sk->sk_allocation = sk_allocation;
236-
237-
if (rc < 0)
238-
return;
235+
/* Schedule the transmission if tx list is ready */
236+
if (is_tx_ready(ctx, tx_ctx) && !sk->sk_write_pending) {
237+
/* Schedule the transmission */
238+
if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
239+
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
239240
}
240241

241242
ctx->sk_write_space(sk);
@@ -270,19 +271,6 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
270271
if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
271272
tls_handle_open_record(sk, 0);
272273

273-
if (ctx->partially_sent_record) {
274-
struct scatterlist *sg = ctx->partially_sent_record;
275-
276-
while (1) {
277-
put_page(sg_page(sg));
278-
sk_mem_uncharge(sk, sg->length);
279-
280-
if (sg_is_last(sg))
281-
break;
282-
sg++;
283-
}
284-
}
285-
286274
/* We need these for tls_sw_fallback handling of other packets */
287275
if (ctx->tx_conf == TLS_SW) {
288276
kfree(ctx->tx.rec_seq);

0 commit comments

Comments
 (0)