LCOV - code coverage report
Current view: top level - include/net - request_sock.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 61 65 93.8 %
Date: 2021-04-22 12:43:58 Functions: 7 7 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /*
       3             :  * NET          Generic infrastructure for Network protocols.
       4             :  *
       5             :  *              Definitions for request_sock
       6             :  *
       7             :  * Authors:     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
       8             :  *
       9             :  *              From code originally in include/net/tcp.h
      10             :  */
      11             : #ifndef _REQUEST_SOCK_H
      12             : #define _REQUEST_SOCK_H
      13             : 
      14             : #include <linux/slab.h>
      15             : #include <linux/spinlock.h>
      16             : #include <linux/types.h>
      17             : #include <linux/bug.h>
      18             : #include <linux/refcount.h>
      19             : 
      20             : #include <net/sock.h>
      21             : 
      22             : struct request_sock;
      23             : struct sk_buff;
      24             : struct dst_entry;
      25             : struct proto;
      26             : 
      27             : struct request_sock_ops {
      28             :         int             family;
      29             :         unsigned int    obj_size;
      30             :         struct kmem_cache       *slab;
      31             :         char            *slab_name;
      32             :         int             (*rtx_syn_ack)(const struct sock *sk,
      33             :                                        struct request_sock *req);
      34             :         void            (*send_ack)(const struct sock *sk, struct sk_buff *skb,
      35             :                                     struct request_sock *req);
      36             :         void            (*send_reset)(const struct sock *sk,
      37             :                                       struct sk_buff *skb);
      38             :         void            (*destructor)(struct request_sock *req);
      39             :         void            (*syn_ack_timeout)(const struct request_sock *req);
      40             : };
      41             : 
      42             : int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
      43             : 
      44             : struct saved_syn {
      45             :         u32 mac_hdrlen;
      46             :         u32 network_hdrlen;
      47             :         u32 tcp_hdrlen;
      48             :         u8 data[];
      49             : };
      50             : 
      51             : /* struct request_sock - mini sock to represent a connection request
      52             :  */
      53             : struct request_sock {
      54             :         struct sock_common              __req_common;
      55             : #define rsk_refcnt                      __req_common.skc_refcnt
      56             : #define rsk_hash                        __req_common.skc_hash
      57             : #define rsk_listener                    __req_common.skc_listener
      58             : #define rsk_window_clamp                __req_common.skc_window_clamp
      59             : #define rsk_rcv_wnd                     __req_common.skc_rcv_wnd
      60             : 
      61             :         struct request_sock             *dl_next;
      62             :         u16                             mss;
      63             :         u8                              num_retrans; /* number of retransmits */
      64             :         u8                              syncookie:1; /* syncookie: encode tcpopts in timestamp */
      65             :         u8                              num_timeout:7; /* number of timeouts */
      66             :         u32                             ts_recent;
      67             :         struct timer_list               rsk_timer;
      68             :         const struct request_sock_ops   *rsk_ops;
      69             :         struct sock                     *sk;
      70             :         struct saved_syn                *saved_syn;
      71             :         u32                             secid;
      72             :         u32                             peer_secid;
      73             : };
      74             : 
      75           4 : static inline struct request_sock *inet_reqsk(const struct sock *sk)
      76             : {
      77           4 :         return (struct request_sock *)sk;
      78             : }
      79             : 
      80          28 : static inline struct sock *req_to_sk(struct request_sock *req)
      81             : {
      82          24 :         return (struct sock *)req;
      83             : }
      84             : 
      85             : static inline struct request_sock *
      86           4 : reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
      87             :             bool attach_listener)
      88             : {
      89           4 :         struct request_sock *req;
      90             : 
      91           4 :         req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
      92           4 :         if (!req)
      93             :                 return NULL;
      94           4 :         req->rsk_listener = NULL;
      95           4 :         if (attach_listener) {
      96           4 :                 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
      97           0 :                         kmem_cache_free(ops->slab, req);
      98           0 :                         return NULL;
      99             :                 }
     100           4 :                 req->rsk_listener = sk_listener;
     101             :         }
     102           4 :         req->rsk_ops = ops;
     103           4 :         req_to_sk(req)->sk_prot = sk_listener->sk_prot;
     104           4 :         sk_node_init(&req_to_sk(req)->sk_node);
     105           4 :         sk_tx_queue_clear(req_to_sk(req));
     106           4 :         req->saved_syn = NULL;
     107           4 :         req->num_timeout = 0;
     108           4 :         req->num_retrans = 0;
     109           4 :         req->sk = NULL;
     110           4 :         refcount_set(&req->rsk_refcnt, 0);
     111             : 
     112           4 :         return req;
     113             : }
     114             : 
     115           4 : static inline void __reqsk_free(struct request_sock *req)
     116             : {
     117           4 :         req->rsk_ops->destructor(req);
     118           4 :         if (req->rsk_listener)
     119           4 :                 sock_put(req->rsk_listener);
     120           4 :         kfree(req->saved_syn);
     121           4 :         kmem_cache_free(req->rsk_ops->slab, req);
     122           4 : }
     123             : 
     124           4 : static inline void reqsk_free(struct request_sock *req)
     125             : {
     126           4 :         WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
     127           4 :         __reqsk_free(req);
     128           4 : }
     129             : 
     130          12 : static inline void reqsk_put(struct request_sock *req)
     131             : {
     132          12 :         if (refcount_dec_and_test(&req->rsk_refcnt))
     133           4 :                 reqsk_free(req);
     134          12 : }
     135             : 
     136             : /*
     137             :  * For a TCP Fast Open listener -
     138             :  *      lock - protects the access to all the reqsk, which is co-owned by
     139             :  *              the listener and the child socket.
     140             :  *      qlen - pending TFO requests (still in TCP_SYN_RECV).
     141             :  *      max_qlen - max TFO reqs allowed before TFO is disabled.
     142             :  *
     143             :  *      XXX (TFO) - ideally these fields can be made as part of "listen_sock"
     144             :  *      structure above. But there is some implementation difficulty due to
     145             :  *      listen_sock being part of request_sock_queue hence will be freed when
     146             :  *      a listener is stopped. But TFO related fields may continue to be
     147             :  *      accessed even after a listener is closed, until its sk_refcnt drops
     148             :  *      to 0 implying no more outstanding TFO reqs. One solution is to keep
     149             :  *      listen_opt around until sk_refcnt drops to 0. But there is some other
     150             :  *      complexity that needs to be resolved. E.g., a listener can be disabled
     151             :  *      temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
     152             :  */
     153             : struct fastopen_queue {
     154             :         struct request_sock     *rskq_rst_head; /* Keep track of past TFO */
     155             :         struct request_sock     *rskq_rst_tail; /* requests that caused RST.
     156             :                                                  * This is part of the defense
     157             :                                                  * against spoofing attack.
     158             :                                                  */
     159             :         spinlock_t      lock;
     160             :         int             qlen;           /* # of pending (TCP_SYN_RECV) reqs */
     161             :         int             max_qlen;       /* != 0 iff TFO is currently enabled */
     162             : 
     163             :         struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
     164             : };
     165             : 
     166             : /** struct request_sock_queue - queue of request_socks
     167             :  *
     168             :  * @rskq_accept_head - FIFO head of established children
     169             :  * @rskq_accept_tail - FIFO tail of established children
     170             :  * @rskq_defer_accept - User waits for some data after accept()
     171             :  *
     172             :  */
     173             : struct request_sock_queue {
     174             :         spinlock_t              rskq_lock;
     175             :         u8                      rskq_defer_accept;
     176             : 
     177             :         u32                     synflood_warned;
     178             :         atomic_t                qlen;
     179             :         atomic_t                young;
     180             : 
     181             :         struct request_sock     *rskq_accept_head;
     182             :         struct request_sock     *rskq_accept_tail;
     183             :         struct fastopen_queue   fastopenq;  /* Check max_qlen != 0 to determine
     184             :                                              * if TFO is enabled.
     185             :                                              */
     186             : };
     187             : 
     188             : void reqsk_queue_alloc(struct request_sock_queue *queue);
     189             : 
     190             : void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
     191             :                            bool reset);
     192             : 
     193          33 : static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
     194             : {
     195          33 :         return READ_ONCE(queue->rskq_accept_head) == NULL;
     196             : }
     197             : 
     198           5 : static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
     199             :                                                       struct sock *parent)
     200             : {
     201           5 :         struct request_sock *req;
     202             : 
     203           5 :         spin_lock_bh(&queue->rskq_lock);
     204           5 :         req = queue->rskq_accept_head;
     205           5 :         if (req) {
     206           4 :                 sk_acceptq_removed(parent);
     207           4 :                 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
     208           4 :                 if (queue->rskq_accept_head == NULL)
     209           4 :                         queue->rskq_accept_tail = NULL;
     210             :         }
     211           5 :         spin_unlock_bh(&queue->rskq_lock);
     212           5 :         return req;
     213             : }
     214             : 
     215           4 : static inline void reqsk_queue_removed(struct request_sock_queue *queue,
     216             :                                        const struct request_sock *req)
     217             : {
     218           4 :         if (req->num_timeout == 0)
     219           4 :                 atomic_dec(&queue->young);
     220           4 :         atomic_dec(&queue->qlen);
     221           4 : }
     222             : 
     223           4 : static inline void reqsk_queue_added(struct request_sock_queue *queue)
     224             : {
     225           4 :         atomic_inc(&queue->young);
     226           4 :         atomic_inc(&queue->qlen);
     227           4 : }
     228             : 
     229           4 : static inline int reqsk_queue_len(const struct request_sock_queue *queue)
     230             : {
     231           4 :         return atomic_read(&queue->qlen);
     232             : }
     233             : 
     234           0 : static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
     235             : {
     236           0 :         return atomic_read(&queue->young);
     237             : }
     238             : 
     239             : #endif /* _REQUEST_SOCK_H */

Generated by: LCOV version 1.14