LCOV - code coverage report
Current view: top level - include/net - tls.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 9 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
       3             :  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
       4             :  *
       5             :  * This software is available to you under a choice of one of two
       6             :  * licenses.  You may choose to be licensed under the terms of the GNU
       7             :  * General Public License (GPL) Version 2, available from the file
       8             :  * COPYING in the main directory of this source tree, or the
       9             :  * OpenIB.org BSD license below:
      10             :  *
      11             :  *     Redistribution and use in source and binary forms, with or
      12             :  *     without modification, are permitted provided that the following
      13             :  *     conditions are met:
      14             :  *
      15             :  *      - Redistributions of source code must retain the above
      16             :  *        copyright notice, this list of conditions and the following
      17             :  *        disclaimer.
      18             :  *
      19             :  *      - Redistributions in binary form must reproduce the above
      20             :  *        copyright notice, this list of conditions and the following
      21             :  *        disclaimer in the documentation and/or other materials
      22             :  *        provided with the distribution.
      23             :  *
      24             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
      25             :  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
      26             :  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
      27             :  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
      28             :  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
      29             :  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
      30             :  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      31             :  * SOFTWARE.
      32             :  */
      33             : 
      34             : #ifndef _TLS_OFFLOAD_H
      35             : #define _TLS_OFFLOAD_H
      36             : 
      37             : #include <linux/types.h>
      38             : #include <asm/byteorder.h>
      39             : #include <linux/crypto.h>
      40             : #include <linux/socket.h>
      41             : #include <linux/tcp.h>
      42             : #include <linux/skmsg.h>
      43             : #include <linux/mutex.h>
      44             : #include <linux/netdevice.h>
      45             : #include <linux/rcupdate.h>
      46             : 
      47             : #include <net/net_namespace.h>
      48             : #include <net/tcp.h>
      49             : #include <net/strparser.h>
      50             : #include <crypto/aead.h>
      51             : #include <uapi/linux/tls.h>
      52             : 
      53             : 
      54             : /* Maximum data size carried in a TLS record */
      55             : #define TLS_MAX_PAYLOAD_SIZE            ((size_t)1 << 14)
      56             : 
      57             : #define TLS_HEADER_SIZE                 5
      58             : #define TLS_NONCE_OFFSET                TLS_HEADER_SIZE
      59             : 
      60             : #define TLS_CRYPTO_INFO_READY(info)     ((info)->cipher_type)
      61             : 
      62             : #define TLS_RECORD_TYPE_DATA            0x17
      63             : 
      64             : #define TLS_AAD_SPACE_SIZE              13
      65             : 
      66             : #define MAX_IV_SIZE                     16
      67             : #define TLS_MAX_REC_SEQ_SIZE            8
      68             : 
      69             : /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
      70             :  *
      71             :  * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
      72             :  *
      73             :  * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
      74             :  * Hence b0 contains (3 - 1) = 2.
      75             :  */
      76             : #define TLS_AES_CCM_IV_B0_BYTE          2
      77             : 
      78             : #define __TLS_INC_STATS(net, field)                             \
      79             :         __SNMP_INC_STATS((net)->mib.tls_statistics, field)
      80             : #define TLS_INC_STATS(net, field)                               \
      81             :         SNMP_INC_STATS((net)->mib.tls_statistics, field)
      82             : #define __TLS_DEC_STATS(net, field)                             \
      83             :         __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
      84             : #define TLS_DEC_STATS(net, field)                               \
      85             :         SNMP_DEC_STATS((net)->mib.tls_statistics, field)
      86             : 
      87             : enum {
      88             :         TLS_BASE,
      89             :         TLS_SW,
      90             :         TLS_HW,
      91             :         TLS_HW_RECORD,
      92             :         TLS_NUM_CONFIG,
      93             : };
      94             : 
      95             : /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
      96             :  * allocated or mapped for each TLS record. After encryption, the records are
      97             :  * stores in a linked list.
      98             :  */
      99             : struct tls_rec {
     100             :         struct list_head list;
     101             :         int tx_ready;
     102             :         int tx_flags;
     103             : 
     104             :         struct sk_msg msg_plaintext;
     105             :         struct sk_msg msg_encrypted;
     106             : 
     107             :         /* AAD | msg_plaintext.sg.data | sg_tag */
     108             :         struct scatterlist sg_aead_in[2];
     109             :         /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
     110             :         struct scatterlist sg_aead_out[2];
     111             : 
     112             :         char content_type;
     113             :         struct scatterlist sg_content_type;
     114             : 
     115             :         char aad_space[TLS_AAD_SPACE_SIZE];
     116             :         u8 iv_data[MAX_IV_SIZE];
     117             :         struct aead_request aead_req;
     118             :         u8 aead_req_ctx[];
     119             : };
     120             : 
     121             : struct tls_msg {
     122             :         struct strp_msg rxm;
     123             :         u8 control;
     124             : };
     125             : 
     126             : struct tx_work {
     127             :         struct delayed_work work;
     128             :         struct sock *sk;
     129             : };
     130             : 
     131             : struct tls_sw_context_tx {
     132             :         struct crypto_aead *aead_send;
     133             :         struct crypto_wait async_wait;
     134             :         struct tx_work tx_work;
     135             :         struct tls_rec *open_rec;
     136             :         struct list_head tx_list;
     137             :         atomic_t encrypt_pending;
     138             :         /* protect crypto_wait with encrypt_pending */
     139             :         spinlock_t encrypt_compl_lock;
     140             :         int async_notify;
     141             :         u8 async_capable:1;
     142             : 
     143             : #define BIT_TX_SCHEDULED        0
     144             : #define BIT_TX_CLOSING          1
     145             :         unsigned long tx_bitmask;
     146             : };
     147             : 
     148             : struct tls_sw_context_rx {
     149             :         struct crypto_aead *aead_recv;
     150             :         struct crypto_wait async_wait;
     151             :         struct strparser strp;
     152             :         struct sk_buff_head rx_list;    /* list of decrypted 'data' records */
     153             :         void (*saved_data_ready)(struct sock *sk);
     154             : 
     155             :         struct sk_buff *recv_pkt;
     156             :         u8 control;
     157             :         u8 async_capable:1;
     158             :         u8 decrypted:1;
     159             :         atomic_t decrypt_pending;
     160             :         /* protect crypto_wait with decrypt_pending*/
     161             :         spinlock_t decrypt_compl_lock;
     162             :         bool async_notify;
     163             : };
     164             : 
     165             : struct tls_record_info {
     166             :         struct list_head list;
     167             :         u32 end_seq;
     168             :         int len;
     169             :         int num_frags;
     170             :         skb_frag_t frags[MAX_SKB_FRAGS];
     171             : };
     172             : 
     173             : struct tls_offload_context_tx {
     174             :         struct crypto_aead *aead_send;
     175             :         spinlock_t lock;        /* protects records list */
     176             :         struct list_head records_list;
     177             :         struct tls_record_info *open_record;
     178             :         struct tls_record_info *retransmit_hint;
     179             :         u64 hint_record_sn;
     180             :         u64 unacked_record_sn;
     181             : 
     182             :         struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
     183             :         void (*sk_destruct)(struct sock *sk);
     184             :         u8 driver_state[] __aligned(8);
     185             :         /* The TLS layer reserves room for driver specific state
     186             :          * Currently the belief is that there is not enough
     187             :          * driver specific state to justify another layer of indirection
     188             :          */
     189             : #define TLS_DRIVER_STATE_SIZE_TX        16
     190             : };
     191             : 
     192             : #define TLS_OFFLOAD_CONTEXT_SIZE_TX                                            \
     193             :         (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
     194             : 
     195             : enum tls_context_flags {
     196             :         TLS_RX_SYNC_RUNNING = 0,
     197             :         /* Unlike RX where resync is driven entirely by the core in TX only
     198             :          * the driver knows when things went out of sync, so we need the flag
     199             :          * to be atomic.
     200             :          */
     201             :         TLS_TX_SYNC_SCHED = 1,
     202             :         /* tls_dev_del was called for the RX side, device state was released,
     203             :          * but tls_ctx->netdev might still be kept, because TX-side driver
     204             :          * resources might not be released yet. Used to prevent the second
     205             :          * tls_dev_del call in tls_device_down if it happens simultaneously.
     206             :          */
     207             :         TLS_RX_DEV_CLOSED = 2,
     208             : };
     209             : 
     210             : struct cipher_context {
     211             :         char *iv;
     212             :         char *rec_seq;
     213             : };
     214             : 
     215             : union tls_crypto_context {
     216             :         struct tls_crypto_info info;
     217             :         union {
     218             :                 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
     219             :                 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
     220             :                 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
     221             :         };
     222             : };
     223             : 
     224             : struct tls_prot_info {
     225             :         u16 version;
     226             :         u16 cipher_type;
     227             :         u16 prepend_size;
     228             :         u16 tag_size;
     229             :         u16 overhead_size;
     230             :         u16 iv_size;
     231             :         u16 salt_size;
     232             :         u16 rec_seq_size;
     233             :         u16 aad_size;
     234             :         u16 tail_size;
     235             : };
     236             : 
     237             : struct tls_context {
     238             :         /* read-only cache line */
     239             :         struct tls_prot_info prot_info;
     240             : 
     241             :         u8 tx_conf:3;
     242             :         u8 rx_conf:3;
     243             : 
     244             :         int (*push_pending_record)(struct sock *sk, int flags);
     245             :         void (*sk_write_space)(struct sock *sk);
     246             : 
     247             :         void *priv_ctx_tx;
     248             :         void *priv_ctx_rx;
     249             : 
     250             :         struct net_device *netdev;
     251             : 
     252             :         /* rw cache line */
     253             :         struct cipher_context tx;
     254             :         struct cipher_context rx;
     255             : 
     256             :         struct scatterlist *partially_sent_record;
     257             :         u16 partially_sent_offset;
     258             : 
     259             :         bool in_tcp_sendpages;
     260             :         bool pending_open_record_frags;
     261             : 
     262             :         struct mutex tx_lock; /* protects partially_sent_* fields and
     263             :                                * per-type TX fields
     264             :                                */
     265             :         unsigned long flags;
     266             : 
     267             :         /* cache cold stuff */
     268             :         struct proto *sk_proto;
     269             : 
     270             :         void (*sk_destruct)(struct sock *sk);
     271             : 
     272             :         union tls_crypto_context crypto_send;
     273             :         union tls_crypto_context crypto_recv;
     274             : 
     275             :         struct list_head list;
     276             :         refcount_t refcount;
     277             :         struct rcu_head rcu;
     278             : };
     279             : 
     280             : enum tls_offload_ctx_dir {
     281             :         TLS_OFFLOAD_CTX_DIR_RX,
     282             :         TLS_OFFLOAD_CTX_DIR_TX,
     283             : };
     284             : 
     285             : struct tlsdev_ops {
     286             :         int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
     287             :                            enum tls_offload_ctx_dir direction,
     288             :                            struct tls_crypto_info *crypto_info,
     289             :                            u32 start_offload_tcp_sn);
     290             :         void (*tls_dev_del)(struct net_device *netdev,
     291             :                             struct tls_context *ctx,
     292             :                             enum tls_offload_ctx_dir direction);
     293             :         int (*tls_dev_resync)(struct net_device *netdev,
     294             :                               struct sock *sk, u32 seq, u8 *rcd_sn,
     295             :                               enum tls_offload_ctx_dir direction);
     296             : };
     297             : 
     298             : enum tls_offload_sync_type {
     299             :         TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
     300             :         TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
     301             :         TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
     302             : };
     303             : 
     304             : #define TLS_DEVICE_RESYNC_NH_START_IVAL         2
     305             : #define TLS_DEVICE_RESYNC_NH_MAX_IVAL           128
     306             : 
     307             : #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX          13
     308             : struct tls_offload_resync_async {
     309             :         atomic64_t req;
     310             :         u16 loglen;
     311             :         u16 rcd_delta;
     312             :         u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
     313             : };
     314             : 
     315             : struct tls_offload_context_rx {
     316             :         /* sw must be the first member of tls_offload_context_rx */
     317             :         struct tls_sw_context_rx sw;
     318             :         enum tls_offload_sync_type resync_type;
     319             :         /* this member is set regardless of resync_type, to avoid branches */
     320             :         u8 resync_nh_reset:1;
     321             :         /* CORE_NEXT_HINT-only member, but use the hole here */
     322             :         u8 resync_nh_do_now:1;
     323             :         union {
     324             :                 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
     325             :                 struct {
     326             :                         atomic64_t resync_req;
     327             :                 };
     328             :                 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
     329             :                 struct {
     330             :                         u32 decrypted_failed;
     331             :                         u32 decrypted_tgt;
     332             :                 } resync_nh;
     333             :                 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
     334             :                 struct {
     335             :                         struct tls_offload_resync_async *resync_async;
     336             :                 };
     337             :         };
     338             :         u8 driver_state[] __aligned(8);
     339             :         /* The TLS layer reserves room for driver specific state
     340             :          * Currently the belief is that there is not enough
     341             :          * driver specific state to justify another layer of indirection
     342             :          */
     343             : #define TLS_DRIVER_STATE_SIZE_RX        8
     344             : };
     345             : 
     346             : #define TLS_OFFLOAD_CONTEXT_SIZE_RX                                     \
     347             :         (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
     348             : 
     349             : struct tls_context *tls_ctx_create(struct sock *sk);
     350             : void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
     351             : void update_sk_prot(struct sock *sk, struct tls_context *ctx);
     352             : 
     353             : int wait_on_pending_writer(struct sock *sk, long *timeo);
     354             : int tls_sk_query(struct sock *sk, int optname, char __user *optval,
     355             :                 int __user *optlen);
     356             : int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
     357             :                   unsigned int optlen);
     358             : 
     359             : int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
     360             : void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
     361             : void tls_sw_strparser_done(struct tls_context *tls_ctx);
     362             : int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
     363             : int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
     364             :                            int offset, size_t size, int flags);
     365             : int tls_sw_sendpage(struct sock *sk, struct page *page,
     366             :                     int offset, size_t size, int flags);
     367             : void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
     368             : void tls_sw_release_resources_tx(struct sock *sk);
     369             : void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
     370             : void tls_sw_free_resources_rx(struct sock *sk);
     371             : void tls_sw_release_resources_rx(struct sock *sk);
     372             : void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
     373             : int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
     374             :                    int nonblock, int flags, int *addr_len);
     375             : bool tls_sw_stream_read(const struct sock *sk);
     376             : ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
     377             :                            struct pipe_inode_info *pipe,
     378             :                            size_t len, unsigned int flags);
     379             : 
     380             : int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
     381             : int tls_device_sendpage(struct sock *sk, struct page *page,
     382             :                         int offset, size_t size, int flags);
     383             : int tls_tx_records(struct sock *sk, int flags);
     384             : 
     385             : struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
     386             :                                        u32 seq, u64 *p_record_sn);
     387             : 
     388             : static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
     389             : {
     390             :         return rec->len == 0;
     391             : }
     392             : 
     393             : static inline u32 tls_record_start_seq(struct tls_record_info *rec)
     394             : {
     395             :         return rec->end_seq - rec->len;
     396             : }
     397             : 
     398             : int tls_push_sg(struct sock *sk, struct tls_context *ctx,
     399             :                 struct scatterlist *sg, u16 first_offset,
     400             :                 int flags);
     401             : int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
     402             :                             int flags);
     403             : void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
     404             : 
     405             : static inline struct tls_msg *tls_msg(struct sk_buff *skb)
     406             : {
     407             :         return (struct tls_msg *)strp_msg(skb);
     408             : }
     409             : 
     410             : static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
     411             : {
     412             :         return !!ctx->partially_sent_record;
     413             : }
     414             : 
     415             : static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
     416             : {
     417             :         return tls_ctx->pending_open_record_frags;
     418             : }
     419             : 
     420             : static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
     421             : {
     422             :         struct tls_rec *rec;
     423             : 
     424             :         rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
     425             :         if (!rec)
     426             :                 return false;
     427             : 
     428             :         return READ_ONCE(rec->tx_ready);
     429             : }
     430             : 
     431             : static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
     432             : {
     433             :         u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
     434             : 
     435             :         switch (config) {
     436             :         case TLS_BASE:
     437             :                 return TLS_CONF_BASE;
     438             :         case TLS_SW:
     439             :                 return TLS_CONF_SW;
     440             :         case TLS_HW:
     441             :                 return TLS_CONF_HW;
     442             :         case TLS_HW_RECORD:
     443             :                 return TLS_CONF_HW_RECORD;
     444             :         }
     445             :         return 0;
     446             : }
     447             : 
     448             : struct sk_buff *
     449             : tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
     450             :                       struct sk_buff *skb);
     451             : 
     452             : static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
     453             : {
     454             : #ifdef CONFIG_SOCK_VALIDATE_XMIT
     455             :         return sk_fullsock(sk) &&
     456             :                (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
     457             :                &tls_validate_xmit_skb);
     458             : #else
     459             :         return false;
     460             : #endif
     461             : }
     462             : 
     463             : static inline void tls_err_abort(struct sock *sk, int err)
     464             : {
     465             :         sk->sk_err = err;
     466             :         sk->sk_error_report(sk);
     467             : }
     468             : 
     469             : static inline bool tls_bigint_increment(unsigned char *seq, int len)
     470             : {
     471             :         int i;
     472             : 
     473             :         for (i = len - 1; i >= 0; i--) {
     474             :                 ++seq[i];
     475             :                 if (seq[i] != 0)
     476             :                         break;
     477             :         }
     478             : 
     479             :         return (i == -1);
     480             : }
     481             : 
     482             : static inline void tls_bigint_subtract(unsigned char *seq, int  n)
     483             : {
     484             :         u64 rcd_sn;
     485             :         __be64 *p;
     486             : 
     487             :         BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
     488             : 
     489             :         p = (__be64 *)seq;
     490             :         rcd_sn = be64_to_cpu(*p);
     491             :         *p = cpu_to_be64(rcd_sn - n);
     492             : }
     493             : 
     494           0 : static inline struct tls_context *tls_get_ctx(const struct sock *sk)
     495             : {
     496           0 :         struct inet_connection_sock *icsk = inet_csk(sk);
     497             : 
     498             :         /* Use RCU on icsk_ulp_data only for sock diag code,
     499             :          * TLS data path doesn't need rcu_dereference().
     500             :          */
     501           0 :         return (__force void *)icsk->icsk_ulp_data;
     502             : }
     503             : 
     504             : static inline void tls_advance_record_sn(struct sock *sk,
     505             :                                          struct tls_prot_info *prot,
     506             :                                          struct cipher_context *ctx)
     507             : {
     508             :         if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
     509             :                 tls_err_abort(sk, EBADMSG);
     510             : 
     511             :         if (prot->version != TLS_1_3_VERSION &&
     512             :             prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
     513             :                 tls_bigint_increment(ctx->iv + prot->salt_size,
     514             :                                      prot->iv_size);
     515             : }
     516             : 
     517             : static inline void tls_fill_prepend(struct tls_context *ctx,
     518             :                              char *buf,
     519             :                              size_t plaintext_len,
     520             :                              unsigned char record_type)
     521             : {
     522             :         struct tls_prot_info *prot = &ctx->prot_info;
     523             :         size_t pkt_len, iv_size = prot->iv_size;
     524             : 
     525             :         pkt_len = plaintext_len + prot->tag_size;
     526             :         if (prot->version != TLS_1_3_VERSION &&
     527             :             prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
     528             :                 pkt_len += iv_size;
     529             : 
     530             :                 memcpy(buf + TLS_NONCE_OFFSET,
     531             :                        ctx->tx.iv + prot->salt_size, iv_size);
     532             :         }
     533             : 
     534             :         /* we cover nonce explicit here as well, so buf should be of
     535             :          * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
     536             :          */
     537             :         buf[0] = prot->version == TLS_1_3_VERSION ?
     538             :                    TLS_RECORD_TYPE_DATA : record_type;
     539             :         /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
     540             :         buf[1] = TLS_1_2_VERSION_MINOR;
     541             :         buf[2] = TLS_1_2_VERSION_MAJOR;
     542             :         /* we can use IV for nonce explicit according to spec */
     543             :         buf[3] = pkt_len >> 8;
     544             :         buf[4] = pkt_len & 0xFF;
     545             : }
     546             : 
     547             : static inline void tls_make_aad(char *buf,
     548             :                                 size_t size,
     549             :                                 char *record_sequence,
     550             :                                 unsigned char record_type,
     551             :                                 struct tls_prot_info *prot)
     552             : {
     553             :         if (prot->version != TLS_1_3_VERSION) {
     554             :                 memcpy(buf, record_sequence, prot->rec_seq_size);
     555             :                 buf += 8;
     556             :         } else {
     557             :                 size += prot->tag_size;
     558             :         }
     559             : 
     560             :         buf[0] = prot->version == TLS_1_3_VERSION ?
     561             :                   TLS_RECORD_TYPE_DATA : record_type;
     562             :         buf[1] = TLS_1_2_VERSION_MAJOR;
     563             :         buf[2] = TLS_1_2_VERSION_MINOR;
     564             :         buf[3] = size >> 8;
     565             :         buf[4] = size & 0xFF;
     566             : }
     567             : 
     568             : static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
     569             : {
     570             :         int i;
     571             : 
     572             :         if (prot->version == TLS_1_3_VERSION ||
     573             :             prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
     574             :                 for (i = 0; i < 8; i++)
     575             :                         iv[i + 4] ^= seq[i];
     576             :         }
     577             : }
     578             : 
     579             : 
     580           0 : static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
     581             :                 const struct tls_context *tls_ctx)
     582             : {
     583           0 :         return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
     584             : }
     585             : 
     586             : static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
     587             :                 const struct tls_context *tls_ctx)
     588             : {
     589             :         return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
     590             : }
     591             : 
     592             : static inline struct tls_offload_context_tx *
     593             : tls_offload_ctx_tx(const struct tls_context *tls_ctx)
     594             : {
     595             :         return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
     596             : }
     597             : 
     598             : static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
     599             : {
     600             :         struct tls_context *ctx = tls_get_ctx(sk);
     601             : 
     602             :         if (!ctx)
     603             :                 return false;
     604             :         return !!tls_sw_ctx_tx(ctx);
     605             : }
     606             : 
     607           0 : static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
     608             : {
     609           0 :         struct tls_context *ctx = tls_get_ctx(sk);
     610             : 
     611           0 :         if (!ctx)
     612             :                 return false;
     613           0 :         return !!tls_sw_ctx_rx(ctx);
     614             : }
     615             : 
     616             : void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
     617             : void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
     618             : 
     619             : static inline struct tls_offload_context_rx *
     620             : tls_offload_ctx_rx(const struct tls_context *tls_ctx)
     621             : {
     622             :         return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
     623             : }
     624             : 
     625             : #if IS_ENABLED(CONFIG_TLS_DEVICE)
     626             : static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
     627             :                                      enum tls_offload_ctx_dir direction)
     628             : {
     629             :         if (direction == TLS_OFFLOAD_CTX_DIR_TX)
     630             :                 return tls_offload_ctx_tx(tls_ctx)->driver_state;
     631             :         else
     632             :                 return tls_offload_ctx_rx(tls_ctx)->driver_state;
     633             : }
     634             : 
     635             : static inline void *
     636             : tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
     637             : {
     638             :         return __tls_driver_ctx(tls_get_ctx(sk), direction);
     639             : }
     640             : #endif
     641             : 
     642             : #define RESYNC_REQ BIT(0)
     643             : #define RESYNC_REQ_ASYNC BIT(1)
     644             : /* The TLS context is valid until sk_destruct is called */
     645             : static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
     646             : {
     647             :         struct tls_context *tls_ctx = tls_get_ctx(sk);
     648             :         struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
     649             : 
     650             :         atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
     651             : }
     652             : 
     653             : /* Log all TLS record header TCP sequences in [seq, seq+len] */
     654             : static inline void
     655             : tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
     656             : {
     657             :         struct tls_context *tls_ctx = tls_get_ctx(sk);
     658             :         struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
     659             : 
     660             :         atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
     661             :                      ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
     662             :         rx_ctx->resync_async->loglen = 0;
     663             :         rx_ctx->resync_async->rcd_delta = 0;
     664             : }
     665             : 
     666             : static inline void
     667             : tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
     668             : {
     669             :         struct tls_context *tls_ctx = tls_get_ctx(sk);
     670             :         struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
     671             : 
     672             :         atomic64_set(&rx_ctx->resync_async->req,
     673             :                      ((u64)ntohl(seq) << 32) | RESYNC_REQ);
     674             : }
     675             : 
     676             : static inline void
     677             : tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
     678             : {
     679             :         struct tls_context *tls_ctx = tls_get_ctx(sk);
     680             : 
     681             :         tls_offload_ctx_rx(tls_ctx)->resync_type = type;
     682             : }
     683             : 
     684             : /* Driver's seq tracking has to be disabled until resync succeeded */
     685             : static inline bool tls_offload_tx_resync_pending(struct sock *sk)
     686             : {
     687             :         struct tls_context *tls_ctx = tls_get_ctx(sk);
     688             :         bool ret;
     689             : 
     690             :         ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
     691             :         smp_mb__after_atomic();
     692             :         return ret;
     693             : }
     694             : 
     695             : int __net_init tls_proc_init(struct net *net);
     696             : void __net_exit tls_proc_fini(struct net *net);
     697             : 
     698             : int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
     699             :                       unsigned char *record_type);
     700             : int decrypt_skb(struct sock *sk, struct sk_buff *skb,
     701             :                 struct scatterlist *sgout);
     702             : struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
     703             : 
     704             : int tls_sw_fallback_init(struct sock *sk,
     705             :                          struct tls_offload_context_tx *offload_ctx,
     706             :                          struct tls_crypto_info *crypto_info);
     707             : 
     708             : #ifdef CONFIG_TLS_DEVICE
     709             : void tls_device_init(void);
     710             : void tls_device_cleanup(void);
     711             : void tls_device_sk_destruct(struct sock *sk);
     712             : int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
     713             : void tls_device_free_resources_tx(struct sock *sk);
     714             : int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
     715             : void tls_device_offload_cleanup_rx(struct sock *sk);
     716             : void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
     717             : void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
     718             : int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
     719             :                          struct sk_buff *skb, struct strp_msg *rxm);
     720             : 
     721             : static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
     722             : {
     723             :         if (!sk_fullsock(sk) ||
     724             :             smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
     725             :                 return false;
     726             :         return tls_get_ctx(sk)->rx_conf == TLS_HW;
     727             : }
     728             : #else
     729             : static inline void tls_device_init(void) {}
     730             : static inline void tls_device_cleanup(void) {}
     731             : 
     732             : static inline int
     733             : tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
     734             : {
     735             :         return -EOPNOTSUPP;
     736             : }
     737             : 
     738             : static inline void tls_device_free_resources_tx(struct sock *sk) {}
     739             : 
     740             : static inline int
     741             : tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
     742             : {
     743             :         return -EOPNOTSUPP;
     744             : }
     745             : 
     746             : static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
     747             : static inline void
     748             : tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
     749             : 
     750             : static inline int
     751             : tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
     752             :                      struct sk_buff *skb, struct strp_msg *rxm)
     753             : {
     754             :         return 0;
     755             : }
     756             : #endif
     757             : #endif /* _TLS_OFFLOAD_H */

Generated by: LCOV version 1.14