LCOV - code coverage report
Current view: top level - net/ipv4 - tcp_fastopen.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 48 270 17.8 %
Date: 2021-04-22 12:43:58 Functions: 5 20 25.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/crypto.h>
       3             : #include <linux/err.h>
       4             : #include <linux/init.h>
       5             : #include <linux/kernel.h>
       6             : #include <linux/list.h>
       7             : #include <linux/tcp.h>
       8             : #include <linux/rcupdate.h>
       9             : #include <linux/rculist.h>
      10             : #include <net/inetpeer.h>
      11             : #include <net/tcp.h>
      12             : 
      13           1 : void tcp_fastopen_init_key_once(struct net *net)
      14             : {
      15           1 :         u8 key[TCP_FASTOPEN_KEY_LENGTH];
      16           1 :         struct tcp_fastopen_context *ctxt;
      17             : 
      18           1 :         rcu_read_lock();
      19           1 :         ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
      20           1 :         if (ctxt) {
      21           0 :                 rcu_read_unlock();
      22           0 :                 return;
      23             :         }
      24           1 :         rcu_read_unlock();
      25             : 
      26             :         /* tcp_fastopen_reset_cipher publishes the new context
      27             :          * atomically, so we allow this race happening here.
      28             :          *
      29             :          * All call sites of tcp_fastopen_cookie_gen also check
      30             :          * for a valid cookie, so this is an acceptable risk.
      31             :          */
      32           1 :         get_random_bytes(key, sizeof(key));
      33           1 :         tcp_fastopen_reset_cipher(net, NULL, key, NULL);
      34             : }
      35             : 
      36           0 : static void tcp_fastopen_ctx_free(struct rcu_head *head)
      37             : {
      38           0 :         struct tcp_fastopen_context *ctx =
      39           0 :             container_of(head, struct tcp_fastopen_context, rcu);
      40             : 
      41           0 :         kfree_sensitive(ctx);
      42           0 : }
      43             : 
      44           4 : void tcp_fastopen_destroy_cipher(struct sock *sk)
      45             : {
      46           4 :         struct tcp_fastopen_context *ctx;
      47             : 
      48           4 :         ctx = rcu_dereference_protected(
      49             :                         inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
      50           4 :         if (ctx)
      51           0 :                 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
      52           4 : }
      53             : 
      54           0 : void tcp_fastopen_ctx_destroy(struct net *net)
      55             : {
      56           0 :         struct tcp_fastopen_context *ctxt;
      57             : 
      58           0 :         spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
      59             : 
      60           0 :         ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
      61             :                                 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
      62           0 :         rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
      63           0 :         spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
      64             : 
      65           0 :         if (ctxt)
      66           0 :                 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
      67           0 : }
      68             : 
      69           1 : int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
      70             :                               void *primary_key, void *backup_key)
      71             : {
      72           1 :         struct tcp_fastopen_context *ctx, *octx;
      73           1 :         struct fastopen_queue *q;
      74           1 :         int err = 0;
      75             : 
      76           1 :         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
      77           1 :         if (!ctx) {
      78           0 :                 err = -ENOMEM;
      79           0 :                 goto out;
      80             :         }
      81             : 
      82           1 :         ctx->key[0].key[0] = get_unaligned_le64(primary_key);
      83           1 :         ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
      84           1 :         if (backup_key) {
      85           0 :                 ctx->key[1].key[0] = get_unaligned_le64(backup_key);
      86           0 :                 ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
      87           0 :                 ctx->num = 2;
      88             :         } else {
      89           1 :                 ctx->num = 1;
      90             :         }
      91             : 
      92           1 :         spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
      93           1 :         if (sk) {
      94           0 :                 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
      95           0 :                 octx = rcu_dereference_protected(q->ctx,
      96             :                         lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
      97           0 :                 rcu_assign_pointer(q->ctx, ctx);
      98             :         } else {
      99           1 :                 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
     100             :                         lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
     101           1 :                 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
     102             :         }
     103           1 :         spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
     104             : 
     105           1 :         if (octx)
     106           0 :                 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
     107           1 : out:
     108           1 :         return err;
     109             : }
     110             : 
     111           0 : int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
     112             :                             u64 *key)
     113             : {
     114           0 :         struct tcp_fastopen_context *ctx;
     115           0 :         int n_keys = 0, i;
     116             : 
     117           0 :         rcu_read_lock();
     118           0 :         if (icsk)
     119           0 :                 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
     120             :         else
     121           0 :                 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
     122           0 :         if (ctx) {
     123           0 :                 n_keys = tcp_fastopen_context_len(ctx);
     124           0 :                 for (i = 0; i < n_keys; i++) {
     125           0 :                         put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
     126           0 :                         put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
     127             :                 }
     128             :         }
     129           0 :         rcu_read_unlock();
     130             : 
     131           0 :         return n_keys;
     132             : }
     133             : 
     134           0 : static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
     135             :                                              struct sk_buff *syn,
     136             :                                              const siphash_key_t *key,
     137             :                                              struct tcp_fastopen_cookie *foc)
     138             : {
     139           0 :         BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
     140             : 
     141           0 :         if (req->rsk_ops->family == AF_INET) {
     142           0 :                 const struct iphdr *iph = ip_hdr(syn);
     143             : 
     144           0 :                 foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
     145             :                                           sizeof(iph->saddr) +
     146             :                                           sizeof(iph->daddr),
     147             :                                           key));
     148           0 :                 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
     149           0 :                 return true;
     150             :         }
     151             : #if IS_ENABLED(CONFIG_IPV6)
     152             :         if (req->rsk_ops->family == AF_INET6) {
     153             :                 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
     154             : 
     155             :                 foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
     156             :                                           sizeof(ip6h->saddr) +
     157             :                                           sizeof(ip6h->daddr),
     158             :                                           key));
     159             :                 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
     160             :                 return true;
     161             :         }
     162             : #endif
     163             :         return false;
     164             : }
     165             : 
     166             : /* Generate the fastopen cookie by applying SipHash to both the source and
     167             :  * destination addresses.
     168             :  */
     169           0 : static void tcp_fastopen_cookie_gen(struct sock *sk,
     170             :                                     struct request_sock *req,
     171             :                                     struct sk_buff *syn,
     172             :                                     struct tcp_fastopen_cookie *foc)
     173             : {
     174           0 :         struct tcp_fastopen_context *ctx;
     175             : 
     176           0 :         rcu_read_lock();
     177           0 :         ctx = tcp_fastopen_get_ctx(sk);
     178           0 :         if (ctx)
     179           0 :                 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
     180           0 :         rcu_read_unlock();
     181           0 : }
     182             : 
     183             : /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
     184             :  * queue this additional data / FIN.
     185             :  */
     186           0 : void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
     187             : {
     188           0 :         struct tcp_sock *tp = tcp_sk(sk);
     189             : 
     190           0 :         if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
     191             :                 return;
     192             : 
     193           0 :         skb = skb_clone(skb, GFP_ATOMIC);
     194           0 :         if (!skb)
     195             :                 return;
     196             : 
     197           0 :         skb_dst_drop(skb);
     198             :         /* segs_in has been initialized to 1 in tcp_create_openreq_child().
     199             :          * Hence, reset segs_in to 0 before calling tcp_segs_in()
     200             :          * to avoid double counting.  Also, tcp_segs_in() expects
     201             :          * skb->len to include the tcp_hdrlen.  Hence, it should
     202             :          * be called before __skb_pull().
     203             :          */
     204           0 :         tp->segs_in = 0;
     205           0 :         tcp_segs_in(tp, skb);
     206           0 :         __skb_pull(skb, tcp_hdrlen(skb));
     207           0 :         sk_forced_mem_schedule(sk, skb->truesize);
     208           0 :         skb_set_owner_r(skb, sk);
     209             : 
     210           0 :         TCP_SKB_CB(skb)->seq++;
     211           0 :         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
     212             : 
     213           0 :         tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
     214           0 :         __skb_queue_tail(&sk->sk_receive_queue, skb);
     215           0 :         tp->syn_data_acked = 1;
     216             : 
     217             :         /* u64_stats_update_begin(&tp->syncp) not needed here,
     218             :          * as we certainly are not changing upper 32bit value (0)
     219             :          */
     220           0 :         tp->bytes_received = skb->len;
     221             : 
     222           0 :         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
     223           0 :                 tcp_fin(sk);
     224             : }
     225             : 
     226             : /* returns 0 - no key match, 1 for primary, 2 for backup */
     227           0 : static int tcp_fastopen_cookie_gen_check(struct sock *sk,
     228             :                                          struct request_sock *req,
     229             :                                          struct sk_buff *syn,
     230             :                                          struct tcp_fastopen_cookie *orig,
     231             :                                          struct tcp_fastopen_cookie *valid_foc)
     232             : {
     233           0 :         struct tcp_fastopen_cookie search_foc = { .len = -1 };
     234           0 :         struct tcp_fastopen_cookie *foc = valid_foc;
     235           0 :         struct tcp_fastopen_context *ctx;
     236           0 :         int i, ret = 0;
     237             : 
     238           0 :         rcu_read_lock();
     239           0 :         ctx = tcp_fastopen_get_ctx(sk);
     240           0 :         if (!ctx)
     241           0 :                 goto out;
     242           0 :         for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
     243           0 :                 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
     244           0 :                 if (tcp_fastopen_cookie_match(foc, orig)) {
     245           0 :                         ret = i + 1;
     246           0 :                         goto out;
     247             :                 }
     248           0 :                 foc = &search_foc;
     249             :         }
     250           0 : out:
     251           0 :         rcu_read_unlock();
     252           0 :         return ret;
     253             : }
     254             : 
     255           0 : static struct sock *tcp_fastopen_create_child(struct sock *sk,
     256             :                                               struct sk_buff *skb,
     257             :                                               struct request_sock *req)
     258             : {
     259           0 :         struct tcp_sock *tp;
     260           0 :         struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
     261           0 :         struct sock *child;
     262           0 :         bool own_req;
     263             : 
     264           0 :         child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
     265             :                                                          NULL, &own_req);
     266           0 :         if (!child)
     267             :                 return NULL;
     268             : 
     269           0 :         spin_lock(&queue->fastopenq.lock);
     270           0 :         queue->fastopenq.qlen++;
     271           0 :         spin_unlock(&queue->fastopenq.lock);
     272             : 
     273             :         /* Initialize the child socket. Have to fix some values to take
     274             :          * into account the child is a Fast Open socket and is created
     275             :          * only out of the bits carried in the SYN packet.
     276             :          */
     277           0 :         tp = tcp_sk(child);
     278             : 
     279           0 :         rcu_assign_pointer(tp->fastopen_rsk, req);
     280           0 :         tcp_rsk(req)->tfo_listener = true;
     281             : 
     282             :         /* RFC1323: The window in SYN & SYN/ACK segments is never
     283             :          * scaled. So correct it appropriately.
     284             :          */
     285           0 :         tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
     286           0 :         tp->max_window = tp->snd_wnd;
     287             : 
     288             :         /* Activate the retrans timer so that SYNACK can be retransmitted.
     289             :          * The request socket is not added to the ehash
     290             :          * because it's been added to the accept queue directly.
     291             :          */
     292           0 :         inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
     293             :                                   TCP_TIMEOUT_INIT, TCP_RTO_MAX);
     294             : 
     295           0 :         refcount_set(&req->rsk_refcnt, 2);
     296             : 
     297             :         /* Now finish processing the fastopen child socket. */
     298           0 :         tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
     299             : 
     300           0 :         tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
     301             : 
     302           0 :         tcp_fastopen_add_skb(child, skb);
     303             : 
     304           0 :         tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
     305           0 :         tp->rcv_wup = tp->rcv_nxt;
     306             :         /* tcp_conn_request() is sending the SYNACK,
     307             :          * and queues the child into listener accept queue.
     308             :          */
     309           0 :         return child;
     310             : }
     311             : 
     312           0 : static bool tcp_fastopen_queue_check(struct sock *sk)
     313             : {
     314           0 :         struct fastopen_queue *fastopenq;
     315             : 
     316             :         /* Make sure the listener has enabled fastopen, and we don't
     317             :          * exceed the max # of pending TFO requests allowed before trying
     318             :          * to validating the cookie in order to avoid burning CPU cycles
     319             :          * unnecessarily.
     320             :          *
     321             :          * XXX (TFO) - The implication of checking the max_qlen before
     322             :          * processing a cookie request is that clients can't differentiate
     323             :          * between qlen overflow causing Fast Open to be disabled
     324             :          * temporarily vs a server not supporting Fast Open at all.
     325             :          */
     326           0 :         fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
     327           0 :         if (fastopenq->max_qlen == 0)
     328             :                 return false;
     329             : 
     330           0 :         if (fastopenq->qlen >= fastopenq->max_qlen) {
     331           0 :                 struct request_sock *req1;
     332           0 :                 spin_lock(&fastopenq->lock);
     333           0 :                 req1 = fastopenq->rskq_rst_head;
     334           0 :                 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
     335           0 :                         __NET_INC_STATS(sock_net(sk),
     336             :                                         LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
     337           0 :                         spin_unlock(&fastopenq->lock);
     338           0 :                         return false;
     339             :                 }
     340           0 :                 fastopenq->rskq_rst_head = req1->dl_next;
     341           0 :                 fastopenq->qlen--;
     342           0 :                 spin_unlock(&fastopenq->lock);
     343           0 :                 reqsk_put(req1);
     344             :         }
     345             :         return true;
     346             : }
     347             : 
     348           0 : static bool tcp_fastopen_no_cookie(const struct sock *sk,
     349             :                                    const struct dst_entry *dst,
     350             :                                    int flag)
     351             : {
     352           0 :         return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
     353           0 :                tcp_sk(sk)->fastopen_no_cookie ||
     354           0 :                (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
     355             : }
     356             : 
     357             : /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
     358             :  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
     359             :  * cookie request (foc->len == 0).
     360             :  */
     361           4 : struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
     362             :                               struct request_sock *req,
     363             :                               struct tcp_fastopen_cookie *foc,
     364             :                               const struct dst_entry *dst)
     365             : {
     366           4 :         bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
     367           4 :         int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
     368           4 :         struct tcp_fastopen_cookie valid_foc = { .len = -1 };
     369           4 :         struct sock *child;
     370           4 :         int ret = 0;
     371             : 
     372           4 :         if (foc->len == 0) /* Client requests a cookie */
     373           4 :                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
     374             : 
     375           4 :         if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
     376           0 :               (syn_data || foc->len >= 0) &&
     377           0 :               tcp_fastopen_queue_check(sk))) {
     378           4 :                 foc->len = -1;
     379           4 :                 return NULL;
     380             :         }
     381             : 
     382           0 :         if (syn_data &&
     383           0 :             tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
     384           0 :                 goto fastopen;
     385             : 
     386           0 :         if (foc->len == 0) {
     387             :                 /* Client requests a cookie. */
     388           0 :                 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
     389           0 :         } else if (foc->len > 0) {
     390           0 :                 ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
     391             :                                                     &valid_foc);
     392           0 :                 if (!ret) {
     393           0 :                         NET_INC_STATS(sock_net(sk),
     394             :                                       LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
     395             :                 } else {
     396             :                         /* Cookie is valid. Create a (full) child socket to
     397             :                          * accept the data in SYN before returning a SYN-ACK to
     398             :                          * ack the data. If we fail to create the socket, fall
     399             :                          * back and ack the ISN only but includes the same
     400             :                          * cookie.
     401             :                          *
     402             :                          * Note: Data-less SYN with valid cookie is allowed to
     403             :                          * send data in SYN_RECV state.
     404             :                          */
     405           0 : fastopen:
     406           0 :                         child = tcp_fastopen_create_child(sk, skb, req);
     407           0 :                         if (child) {
     408           0 :                                 if (ret == 2) {
     409           0 :                                         valid_foc.exp = foc->exp;
     410           0 :                                         *foc = valid_foc;
     411           0 :                                         NET_INC_STATS(sock_net(sk),
     412             :                                                       LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
     413             :                                 } else {
     414           0 :                                         foc->len = -1;
     415             :                                 }
     416           0 :                                 NET_INC_STATS(sock_net(sk),
     417             :                                               LINUX_MIB_TCPFASTOPENPASSIVE);
     418           0 :                                 return child;
     419             :                         }
     420           0 :                         NET_INC_STATS(sock_net(sk),
     421             :                                       LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
     422             :                 }
     423             :         }
     424           0 :         valid_foc.exp = foc->exp;
     425           0 :         *foc = valid_foc;
     426           0 :         return NULL;
     427             : }
     428             : 
     429           0 : bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
     430             :                                struct tcp_fastopen_cookie *cookie)
     431             : {
     432           0 :         const struct dst_entry *dst;
     433             : 
     434           0 :         tcp_fastopen_cache_get(sk, mss, cookie);
     435             : 
     436             :         /* Firewall blackhole issue check */
     437           0 :         if (tcp_fastopen_active_should_disable(sk)) {
     438           0 :                 cookie->len = -1;
     439           0 :                 return false;
     440             :         }
     441             : 
     442           0 :         dst = __sk_dst_get(sk);
     443             : 
     444           0 :         if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
     445           0 :                 cookie->len = -1;
     446           0 :                 return true;
     447             :         }
     448           0 :         if (cookie->len > 0)
     449             :                 return true;
     450           0 :         tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
     451           0 :         return false;
     452             : }
     453             : 
     454             : /* This function checks if we want to defer sending SYN until the first
     455             :  * write().  We defer under the following conditions:
     456             :  * 1. fastopen_connect sockopt is set
     457             :  * 2. we have a valid cookie
     458             :  * Return value: return true if we want to defer until application writes data
     459             :  *               return false if we want to send out SYN immediately
     460             :  */
     461           0 : bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
     462             : {
     463           0 :         struct tcp_fastopen_cookie cookie = { .len = 0 };
     464           0 :         struct tcp_sock *tp = tcp_sk(sk);
     465           0 :         u16 mss;
     466             : 
     467           0 :         if (tp->fastopen_connect && !tp->fastopen_req) {
     468           0 :                 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
     469           0 :                         inet_sk(sk)->defer_connect = 1;
     470           0 :                         return true;
     471             :                 }
     472             : 
     473             :                 /* Alloc fastopen_req in order for FO option to be included
     474             :                  * in SYN
     475             :                  */
     476           0 :                 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
     477             :                                            sk->sk_allocation);
     478           0 :                 if (tp->fastopen_req)
     479           0 :                         tp->fastopen_req->cookie = cookie;
     480             :                 else
     481           0 :                         *err = -ENOBUFS;
     482             :         }
     483             :         return false;
     484             : }
     485             : EXPORT_SYMBOL(tcp_fastopen_defer_connect);
     486             : 
     487             : /*
     488             :  * The following code block is to deal with middle box issues with TFO:
     489             :  * Middlebox firewall issues can potentially cause server's data being
     490             :  * blackholed after a successful 3WHS using TFO.
     491             :  * The proposed solution is to disable active TFO globally under the
     492             :  * following circumstances:
     493             :  *   1. client side TFO socket receives out of order FIN
     494             :  *   2. client side TFO socket receives out of order RST
     495             :  *   3. client side TFO socket has timed out three times consecutively during
     496             :  *      or after handshake
     497             :  * We disable active side TFO globally for 1hr at first. Then if it
     498             :  * happens again, we disable it for 2h, then 4h, 8h, ...
     499             :  * And we reset the timeout back to 1hr when we see a successful active
     500             :  * TFO connection with data exchanges.
     501             :  */
     502             : 
     503             : /* Disable active TFO and record current jiffies and
     504             :  * tfo_active_disable_times
     505             :  */
     506           0 : void tcp_fastopen_active_disable(struct sock *sk)
     507             : {
     508           0 :         struct net *net = sock_net(sk);
     509             : 
     510           0 :         atomic_inc(&net->ipv4.tfo_active_disable_times);
     511           0 :         net->ipv4.tfo_active_disable_stamp = jiffies;
     512           0 :         NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
     513           0 : }
     514             : 
     515             : /* Calculate timeout for tfo active disable
     516             :  * Return true if we are still in the active TFO disable period
     517             :  * Return false if timeout already expired and we should use active TFO
     518             :  */
     519           0 : bool tcp_fastopen_active_should_disable(struct sock *sk)
     520             : {
     521           0 :         unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
     522           0 :         int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
     523           0 :         unsigned long timeout;
     524           0 :         int multiplier;
     525             : 
     526           0 :         if (!tfo_da_times)
     527             :                 return false;
     528             : 
     529             :         /* Limit timout to max: 2^6 * initial timeout */
     530           0 :         multiplier = 1 << min(tfo_da_times - 1, 6);
     531           0 :         timeout = multiplier * tfo_bh_timeout * HZ;
     532           0 :         if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
     533             :                 return true;
     534             : 
     535             :         /* Mark check bit so we can check for successful active TFO
     536             :          * condition and reset tfo_active_disable_times
     537             :          */
     538           0 :         tcp_sk(sk)->syn_fastopen_ch = 1;
     539           0 :         return false;
     540             : }
     541             : 
     542             : /* Disable active TFO if FIN is the only packet in the ofo queue
     543             :  * and no data is received.
     544             :  * Also check if we can reset tfo_active_disable_times if data is
     545             :  * received successfully on a marked active TFO sockets opened on
     546             :  * a non-loopback interface
     547             :  */
     548           4 : void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
     549             : {
     550           4 :         struct tcp_sock *tp = tcp_sk(sk);
     551           4 :         struct dst_entry *dst;
     552           4 :         struct sk_buff *skb;
     553             : 
     554           4 :         if (!tp->syn_fastopen)
     555             :                 return;
     556             : 
     557           0 :         if (!tp->data_segs_in) {
     558           0 :                 skb = skb_rb_first(&tp->out_of_order_queue);
     559           0 :                 if (skb && !skb_rb_next(skb)) {
     560           0 :                         if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
     561           0 :                                 tcp_fastopen_active_disable(sk);
     562           0 :                                 return;
     563             :                         }
     564             :                 }
     565           0 :         } else if (tp->syn_fastopen_ch &&
     566           0 :                    atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
     567           0 :                 dst = sk_dst_get(sk);
     568           0 :                 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
     569           0 :                         atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
     570           0 :                 dst_release(dst);
     571             :         }
     572             : }
     573             : 
     574           0 : void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
     575             : {
     576           0 :         u32 timeouts = inet_csk(sk)->icsk_retransmits;
     577           0 :         struct tcp_sock *tp = tcp_sk(sk);
     578             : 
     579             :         /* Broken middle-boxes may black-hole Fast Open connection during or
     580             :          * even after the handshake. Be extremely conservative and pause
     581             :          * Fast Open globally after hitting the third consecutive timeout or
     582             :          * exceeding the configured timeout limit.
     583             :          */
     584           0 :         if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
     585           0 :             (timeouts == 2 || (timeouts < 2 && expired))) {
     586           0 :                 tcp_fastopen_active_disable(sk);
     587           0 :                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
     588             :         }
     589           0 : }

Generated by: LCOV version 1.14