Skip to content

Commit 4799ac8

Browse files
Boris Pismennydavem330
authored andcommitted
tls: Add rx inline crypto offload
This patch completes the generic infrastructure to offload TLS crypto to a network device. It enables the kernel to skip decryption and authentication of some skbs marked as decrypted by the NIC. In the fast path, all packets received are decrypted by the NIC and the performance is comparable to plain TCP. This infrastructure doesn't require a TCP offload engine. Instead, the NIC only decrypts packets that contain the expected TCP sequence number. Out-Of-Order TCP packets are provided unmodified. As a result, at the worst case a received TLS record consists of both plaintext and ciphertext packets. These partially decrypted records must be reencrypted, only to be decrypted. The notable differences between SW KTLS Rx and this offload are as follows: 1. Partial decryption - Software must handle the case of a TLS record that was only partially decrypted by HW. This can happen due to packet reordering. 2. Resynchronization - tls_read_size calls the device driver to resynchronize HW after HW lost track of TLS record framing in the TCP stream. Signed-off-by: Boris Pismenny <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent b190a58 commit 4799ac8

File tree

5 files changed

+355
-43
lines changed

5 files changed

+355
-43
lines changed

include/net/tls.h

Lines changed: 59 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,16 @@ struct tls_device {
8383
void (*unhash)(struct tls_device *device, struct sock *sk);
8484
};
8585

86+
enum {
87+
TLS_BASE,
88+
TLS_SW,
89+
#ifdef CONFIG_TLS_DEVICE
90+
TLS_HW,
91+
#endif
92+
TLS_HW_RECORD,
93+
TLS_NUM_CONFIG,
94+
};
95+
8696
struct tls_sw_context_tx {
8797
struct crypto_aead *aead_send;
8898
struct crypto_wait async_wait;
@@ -197,6 +207,7 @@ struct tls_context {
197207
int (*push_pending_record)(struct sock *sk, int flags);
198208

199209
void (*sk_write_space)(struct sock *sk);
210+
void (*sk_destruct)(struct sock *sk);
200211
void (*sk_proto_close)(struct sock *sk, long timeout);
201212

202213
int (*setsockopt)(struct sock *sk, int level,
@@ -209,13 +220,27 @@ struct tls_context {
209220
void (*unhash)(struct sock *sk);
210221
};
211222

223+
struct tls_offload_context_rx {
224+
/* sw must be the first member of tls_offload_context_rx */
225+
struct tls_sw_context_rx sw;
226+
atomic64_t resync_req;
227+
u8 driver_state[];
228+
/* The TLS layer reserves room for driver specific state
229+
* Currently the belief is that there is not enough
230+
* driver specific state to justify another layer of indirection
231+
*/
232+
};
233+
234+
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
235+
(ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
236+
TLS_DRIVER_STATE_SIZE)
237+
212238
int wait_on_pending_writer(struct sock *sk, long *timeo);
213239
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
214240
int __user *optlen);
215241
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
216242
unsigned int optlen);
217243

218-
219244
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
220245
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
221246
int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -290,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
290315
return tls_ctx->pending_open_record_frags;
291316
}
292317

318+
struct sk_buff *
319+
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
320+
struct sk_buff *skb);
321+
293322
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
294323
{
295-
return sk_fullsock(sk) &&
296-
/* matches smp_store_release in tls_set_device_offload */
297-
smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
324+
#ifdef CONFIG_SOCK_VALIDATE_XMIT
325+
return sk_fullsock(sk) &
326+
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
327+
&tls_validate_xmit_skb);
328+
#else
329+
return false;
330+
#endif
298331
}
299332

300333
static inline void tls_err_abort(struct sock *sk, int err)
@@ -387,10 +420,27 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
387420
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
388421
}
389422

423+
static inline struct tls_offload_context_rx *
424+
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
425+
{
426+
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
427+
}
428+
429+
/* The TLS context is valid until sk_destruct is called */
430+
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
431+
{
432+
struct tls_context *tls_ctx = tls_get_ctx(sk);
433+
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
434+
435+
atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
436+
}
437+
438+
390439
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
391440
unsigned char *record_type);
392441
void tls_register_device(struct tls_device *device);
393442
void tls_unregister_device(struct tls_device *device);
443+
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
394444
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
395445
struct scatterlist *sgout);
396446

@@ -402,4 +452,9 @@ int tls_sw_fallback_init(struct sock *sk,
402452
struct tls_offload_context_tx *offload_ctx,
403453
struct tls_crypto_info *crypto_info);
404454

455+
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
456+
457+
void tls_device_offload_cleanup_rx(struct sock *sk);
458+
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
459+
405460
#endif /* _TLS_OFFLOAD_H */

0 commit comments

Comments
 (0)