39
39
#include <linux/crypto.h>
40
40
#include <linux/socket.h>
41
41
#include <linux/tcp.h>
42
- #include <linux/skmsg.h>
43
42
#include <linux/mutex.h>
44
43
#include <linux/netdevice.h>
45
44
#include <linux/rcupdate.h>
50
49
#include <crypto/aead.h>
51
50
#include <uapi/linux/tls.h>
52
51
52
+ struct tls_rec ;
53
53
54
54
/* Maximum data size carried in a TLS record */
55
55
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
78
78
#define TLS_AES_CCM_IV_B0_BYTE 2
79
79
#define TLS_SM4_CCM_IV_B0_BYTE 2
80
80
81
- #define __TLS_INC_STATS (net , field ) \
82
- __SNMP_INC_STATS((net)->mib.tls_statistics, field)
83
- #define TLS_INC_STATS (net , field ) \
84
- SNMP_INC_STATS((net)->mib.tls_statistics, field)
85
- #define TLS_DEC_STATS (net , field ) \
86
- SNMP_DEC_STATS((net)->mib.tls_statistics, field)
87
-
88
81
enum {
89
82
TLS_BASE ,
90
83
TLS_SW ,
@@ -93,32 +86,6 @@ enum {
93
86
TLS_NUM_CONFIG ,
94
87
};
95
88
96
- /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
97
- * allocated or mapped for each TLS record. After encryption, the records are
98
- * stores in a linked list.
99
- */
100
- struct tls_rec {
101
- struct list_head list ;
102
- int tx_ready ;
103
- int tx_flags ;
104
-
105
- struct sk_msg msg_plaintext ;
106
- struct sk_msg msg_encrypted ;
107
-
108
- /* AAD | msg_plaintext.sg.data | sg_tag */
109
- struct scatterlist sg_aead_in [2 ];
110
- /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
111
- struct scatterlist sg_aead_out [2 ];
112
-
113
- char content_type ;
114
- struct scatterlist sg_content_type ;
115
-
116
- char aad_space [TLS_AAD_SPACE_SIZE ];
117
- u8 iv_data [MAX_IV_SIZE ];
118
- struct aead_request aead_req ;
119
- u8 aead_req_ctx [];
120
- };
121
-
122
89
struct tx_work {
123
90
struct delayed_work work ;
124
91
struct sock * sk ;
@@ -349,44 +316,6 @@ struct tls_offload_context_rx {
349
316
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
350
317
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
351
318
352
- struct tls_context * tls_ctx_create (struct sock * sk );
353
- void tls_ctx_free (struct sock * sk , struct tls_context * ctx );
354
- void update_sk_prot (struct sock * sk , struct tls_context * ctx );
355
-
356
- int wait_on_pending_writer (struct sock * sk , long * timeo );
357
- int tls_sk_query (struct sock * sk , int optname , char __user * optval ,
358
- int __user * optlen );
359
- int tls_sk_attach (struct sock * sk , int optname , char __user * optval ,
360
- unsigned int optlen );
361
- void tls_err_abort (struct sock * sk , int err );
362
-
363
- int tls_set_sw_offload (struct sock * sk , struct tls_context * ctx , int tx );
364
- void tls_update_rx_zc_capable (struct tls_context * tls_ctx );
365
- void tls_sw_strparser_arm (struct sock * sk , struct tls_context * ctx );
366
- void tls_sw_strparser_done (struct tls_context * tls_ctx );
367
- int tls_sw_sendmsg (struct sock * sk , struct msghdr * msg , size_t size );
368
- int tls_sw_sendpage_locked (struct sock * sk , struct page * page ,
369
- int offset , size_t size , int flags );
370
- int tls_sw_sendpage (struct sock * sk , struct page * page ,
371
- int offset , size_t size , int flags );
372
- void tls_sw_cancel_work_tx (struct tls_context * tls_ctx );
373
- void tls_sw_release_resources_tx (struct sock * sk );
374
- void tls_sw_free_ctx_tx (struct tls_context * tls_ctx );
375
- void tls_sw_free_resources_rx (struct sock * sk );
376
- void tls_sw_release_resources_rx (struct sock * sk );
377
- void tls_sw_free_ctx_rx (struct tls_context * tls_ctx );
378
- int tls_sw_recvmsg (struct sock * sk , struct msghdr * msg , size_t len ,
379
- int flags , int * addr_len );
380
- bool tls_sw_sock_is_readable (struct sock * sk );
381
- ssize_t tls_sw_splice_read (struct socket * sock , loff_t * ppos ,
382
- struct pipe_inode_info * pipe ,
383
- size_t len , unsigned int flags );
384
-
385
- int tls_device_sendmsg (struct sock * sk , struct msghdr * msg , size_t size );
386
- int tls_device_sendpage (struct sock * sk , struct page * page ,
387
- int offset , size_t size , int flags );
388
- int tls_tx_records (struct sock * sk , int flags );
389
-
390
319
struct tls_record_info * tls_get_record (struct tls_offload_context_tx * context ,
391
320
u32 seq , u64 * p_record_sn );
392
321
@@ -400,58 +329,6 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec)
400
329
return rec -> end_seq - rec -> len ;
401
330
}
402
331
403
- int tls_push_sg (struct sock * sk , struct tls_context * ctx ,
404
- struct scatterlist * sg , u16 first_offset ,
405
- int flags );
406
- int tls_push_partial_record (struct sock * sk , struct tls_context * ctx ,
407
- int flags );
408
- void tls_free_partial_record (struct sock * sk , struct tls_context * ctx );
409
-
410
- static inline struct tls_msg * tls_msg (struct sk_buff * skb )
411
- {
412
- struct sk_skb_cb * scb = (struct sk_skb_cb * )skb -> cb ;
413
-
414
- return & scb -> tls ;
415
- }
416
-
417
- static inline bool tls_is_partially_sent_record (struct tls_context * ctx )
418
- {
419
- return !!ctx -> partially_sent_record ;
420
- }
421
-
422
- static inline bool tls_is_pending_open_record (struct tls_context * tls_ctx )
423
- {
424
- return tls_ctx -> pending_open_record_frags ;
425
- }
426
-
427
- static inline bool is_tx_ready (struct tls_sw_context_tx * ctx )
428
- {
429
- struct tls_rec * rec ;
430
-
431
- rec = list_first_entry (& ctx -> tx_list , struct tls_rec , list );
432
- if (!rec )
433
- return false;
434
-
435
- return READ_ONCE (rec -> tx_ready );
436
- }
437
-
438
- static inline u16 tls_user_config (struct tls_context * ctx , bool tx )
439
- {
440
- u16 config = tx ? ctx -> tx_conf : ctx -> rx_conf ;
441
-
442
- switch (config ) {
443
- case TLS_BASE :
444
- return TLS_CONF_BASE ;
445
- case TLS_SW :
446
- return TLS_CONF_SW ;
447
- case TLS_HW :
448
- return TLS_CONF_HW ;
449
- case TLS_HW_RECORD :
450
- return TLS_CONF_HW_RECORD ;
451
- }
452
- return 0 ;
453
- }
454
-
455
332
struct sk_buff *
456
333
tls_validate_xmit_skb (struct sock * sk , struct net_device * dev ,
457
334
struct sk_buff * skb );
@@ -470,31 +347,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
470
347
#endif
471
348
}
472
349
473
- static inline bool tls_bigint_increment (unsigned char * seq , int len )
474
- {
475
- int i ;
476
-
477
- for (i = len - 1 ; i >= 0 ; i -- ) {
478
- ++ seq [i ];
479
- if (seq [i ] != 0 )
480
- break ;
481
- }
482
-
483
- return (i == -1 );
484
- }
485
-
486
- static inline void tls_bigint_subtract (unsigned char * seq , int n )
487
- {
488
- u64 rcd_sn ;
489
- __be64 * p ;
490
-
491
- BUILD_BUG_ON (TLS_MAX_REC_SEQ_SIZE != 8 );
492
-
493
- p = (__be64 * )seq ;
494
- rcd_sn = be64_to_cpu (* p );
495
- * p = cpu_to_be64 (rcd_sn - n );
496
- }
497
-
498
350
static inline struct tls_context * tls_get_ctx (const struct sock * sk )
499
351
{
500
352
struct inet_connection_sock * icsk = inet_csk (sk );
@@ -505,82 +357,6 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
505
357
return (__force void * )icsk -> icsk_ulp_data ;
506
358
}
507
359
508
- static inline void tls_advance_record_sn (struct sock * sk ,
509
- struct tls_prot_info * prot ,
510
- struct cipher_context * ctx )
511
- {
512
- if (tls_bigint_increment (ctx -> rec_seq , prot -> rec_seq_size ))
513
- tls_err_abort (sk , - EBADMSG );
514
-
515
- if (prot -> version != TLS_1_3_VERSION &&
516
- prot -> cipher_type != TLS_CIPHER_CHACHA20_POLY1305 )
517
- tls_bigint_increment (ctx -> iv + prot -> salt_size ,
518
- prot -> iv_size );
519
- }
520
-
521
- static inline void tls_fill_prepend (struct tls_context * ctx ,
522
- char * buf ,
523
- size_t plaintext_len ,
524
- unsigned char record_type )
525
- {
526
- struct tls_prot_info * prot = & ctx -> prot_info ;
527
- size_t pkt_len , iv_size = prot -> iv_size ;
528
-
529
- pkt_len = plaintext_len + prot -> tag_size ;
530
- if (prot -> version != TLS_1_3_VERSION &&
531
- prot -> cipher_type != TLS_CIPHER_CHACHA20_POLY1305 ) {
532
- pkt_len += iv_size ;
533
-
534
- memcpy (buf + TLS_NONCE_OFFSET ,
535
- ctx -> tx .iv + prot -> salt_size , iv_size );
536
- }
537
-
538
- /* we cover nonce explicit here as well, so buf should be of
539
- * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
540
- */
541
- buf [0 ] = prot -> version == TLS_1_3_VERSION ?
542
- TLS_RECORD_TYPE_DATA : record_type ;
543
- /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
544
- buf [1 ] = TLS_1_2_VERSION_MINOR ;
545
- buf [2 ] = TLS_1_2_VERSION_MAJOR ;
546
- /* we can use IV for nonce explicit according to spec */
547
- buf [3 ] = pkt_len >> 8 ;
548
- buf [4 ] = pkt_len & 0xFF ;
549
- }
550
-
551
- static inline void tls_make_aad (char * buf ,
552
- size_t size ,
553
- char * record_sequence ,
554
- unsigned char record_type ,
555
- struct tls_prot_info * prot )
556
- {
557
- if (prot -> version != TLS_1_3_VERSION ) {
558
- memcpy (buf , record_sequence , prot -> rec_seq_size );
559
- buf += 8 ;
560
- } else {
561
- size += prot -> tag_size ;
562
- }
563
-
564
- buf [0 ] = prot -> version == TLS_1_3_VERSION ?
565
- TLS_RECORD_TYPE_DATA : record_type ;
566
- buf [1 ] = TLS_1_2_VERSION_MAJOR ;
567
- buf [2 ] = TLS_1_2_VERSION_MINOR ;
568
- buf [3 ] = size >> 8 ;
569
- buf [4 ] = size & 0xFF ;
570
- }
571
-
572
- static inline void xor_iv_with_seq (struct tls_prot_info * prot , char * iv , char * seq )
573
- {
574
- int i ;
575
-
576
- if (prot -> version == TLS_1_3_VERSION ||
577
- prot -> cipher_type == TLS_CIPHER_CHACHA20_POLY1305 ) {
578
- for (i = 0 ; i < 8 ; i ++ )
579
- iv [i + 4 ] ^= seq [i ];
580
- }
581
- }
582
-
583
-
584
360
static inline struct tls_sw_context_rx * tls_sw_ctx_rx (
585
361
const struct tls_context * tls_ctx )
586
362
{
@@ -617,9 +393,6 @@ static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
617
393
return !!tls_sw_ctx_rx (ctx );
618
394
}
619
395
620
- void tls_sw_write_space (struct sock * sk , struct tls_context * ctx );
621
- void tls_device_write_space (struct sock * sk , struct tls_context * ctx );
622
-
623
396
static inline struct tls_offload_context_rx *
624
397
tls_offload_ctx_rx (const struct tls_context * tls_ctx )
625
398
{
@@ -694,31 +467,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
694
467
return ret ;
695
468
}
696
469
697
- int __net_init tls_proc_init (struct net * net );
698
- void __net_exit tls_proc_fini (struct net * net );
699
-
700
- int tls_proccess_cmsg (struct sock * sk , struct msghdr * msg ,
701
- unsigned char * record_type );
702
- int decrypt_skb (struct sock * sk , struct sk_buff * skb ,
703
- struct scatterlist * sgout );
704
470
struct sk_buff * tls_encrypt_skb (struct sk_buff * skb );
705
471
706
- int tls_sw_fallback_init (struct sock * sk ,
707
- struct tls_offload_context_tx * offload_ctx ,
708
- struct tls_crypto_info * crypto_info );
709
-
710
472
#ifdef CONFIG_TLS_DEVICE
711
- void tls_device_init (void );
712
- void tls_device_cleanup (void );
713
473
void tls_device_sk_destruct (struct sock * sk );
714
- int tls_set_device_offload (struct sock * sk , struct tls_context * ctx );
715
- void tls_device_free_resources_tx (struct sock * sk );
716
- int tls_set_device_offload_rx (struct sock * sk , struct tls_context * ctx );
717
- void tls_device_offload_cleanup_rx (struct sock * sk );
718
- void tls_device_rx_resync_new_rec (struct sock * sk , u32 rcd_len , u32 seq );
719
474
void tls_offload_tx_resync_request (struct sock * sk , u32 got_seq , u32 exp_seq );
720
- int tls_device_decrypted (struct sock * sk , struct tls_context * tls_ctx ,
721
- struct sk_buff * skb , struct strp_msg * rxm );
722
475
723
476
static inline bool tls_is_sk_rx_device_offloaded (struct sock * sk )
724
477
{
@@ -727,33 +480,5 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
727
480
return false;
728
481
return tls_get_ctx (sk )-> rx_conf == TLS_HW ;
729
482
}
730
- #else
731
- static inline void tls_device_init (void ) {}
732
- static inline void tls_device_cleanup (void ) {}
733
-
734
- static inline int
735
- tls_set_device_offload (struct sock * sk , struct tls_context * ctx )
736
- {
737
- return - EOPNOTSUPP ;
738
- }
739
-
740
- static inline void tls_device_free_resources_tx (struct sock * sk ) {}
741
-
742
- static inline int
743
- tls_set_device_offload_rx (struct sock * sk , struct tls_context * ctx )
744
- {
745
- return - EOPNOTSUPP ;
746
- }
747
-
748
- static inline void tls_device_offload_cleanup_rx (struct sock * sk ) {}
749
- static inline void
750
- tls_device_rx_resync_new_rec (struct sock * sk , u32 rcd_len , u32 seq ) {}
751
-
752
- static inline int
753
- tls_device_decrypted (struct sock * sk , struct tls_context * tls_ctx ,
754
- struct sk_buff * skb , struct strp_msg * rxm )
755
- {
756
- return 0 ;
757
- }
758
483
#endif
759
484
#endif /* _TLS_OFFLOAD_H */
0 commit comments