LCOV - code coverage report
Current view: top level - net/ipv4 - tcp_rate.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 56 63 88.9 %
Date: 2021-04-22 12:43:58 Functions: 4 4 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : #include <net/tcp.h>
       3             : 
       4             : /* The bandwidth estimator estimates the rate at which the network
       5             :  * can currently deliver outbound data packets for this flow. At a high
       6             :  * level, it operates by taking a delivery rate sample for each ACK.
       7             :  *
       8             :  * A rate sample records the rate at which the network delivered packets
       9             :  * for this flow, calculated over the time interval between the transmission
      10             :  * of a data packet and the acknowledgment of that packet.
      11             :  *
      12             :  * Specifically, over the interval between each transmit and corresponding ACK,
      13             :  * the estimator generates a delivery rate sample. Typically it uses the rate
      14             :  * at which packets were acknowledged. However, the approach of using only the
      15             :  * acknowledgment rate faces a challenge under the prevalent ACK decimation or
      16             :  * compression: packets can temporarily appear to be delivered much quicker
      17             :  * than the bottleneck rate. Since it is physically impossible to do that in a
      18             :  * sustained fashion, when the estimator notices that the ACK rate is faster
      19             :  * than the transmit rate, it uses the latter:
      20             :  *
      21             :  *    send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
      22             :  *    ack_rate  = #pkts_delivered/(last_ack_time - first_ack_time)
      23             :  *    bw = min(send_rate, ack_rate)
      24             :  *
      25             :  * Notice the estimator essentially estimates the goodput, not always the
      26             :  * network bottleneck link rate when the sending or receiving is limited by
      27             :  * other factors like applications or receiver window limits.  The estimator
      28             :  * deliberately avoids using the inter-packet spacing approach because that
      29             :  * approach requires a large number of samples and sophisticated filtering.
      30             :  *
      31             :  * TCP flows can often be application-limited in request/response workloads.
      32             :  * The estimator marks a bandwidth sample as application-limited if there
      33             :  * was some moment during the sampled window of packets when there was no data
      34             :  * ready to send in the write queue.
      35             :  */
      36             : 
      37             : /* Snapshot the current delivery information in the skb, to generate
      38             :  * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
      39             :  */
      40         364 : void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
      41             : {
      42         364 :         struct tcp_sock *tp = tcp_sk(sk);
      43             : 
      44             :          /* In general we need to start delivery rate samples from the
      45             :           * time we received the most recent ACK, to ensure we include
      46             :           * the full time the network needs to deliver all in-flight
      47             :           * packets. If there are no packets in flight yet, then we
      48             :           * know that any ACKs after now indicate that the network was
      49             :           * able to deliver those packets completely in the sampling
      50             :           * interval between now and the next ACK.
      51             :           *
      52             :           * Note that we use packets_out instead of tcp_packets_in_flight(tp)
      53             :           * because the latter is a guess based on RTO and loss-marking
      54             :           * heuristics. We don't want spurious RTOs or loss markings to cause
      55             :           * a spuriously small time interval, causing a spuriously high
      56             :           * bandwidth estimate.
      57             :           */
      58         364 :         if (!tp->packets_out) {
      59         234 :                 u64 tstamp_us = tcp_skb_timestamp_us(skb);
      60             : 
      61         234 :                 tp->first_tx_mstamp  = tstamp_us;
      62         234 :                 tp->delivered_mstamp = tstamp_us;
      63             :         }
      64             : 
      65         364 :         TCP_SKB_CB(skb)->tx.first_tx_mstamp  = tp->first_tx_mstamp;
      66         364 :         TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
      67         364 :         TCP_SKB_CB(skb)->tx.delivered                = tp->delivered;
      68         364 :         TCP_SKB_CB(skb)->tx.is_app_limited   = tp->app_limited ? 1 : 0;
      69         364 : }
      70             : 
      71             : /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
      72             :  * delivery information when the skb was last transmitted.
      73             :  *
      74             :  * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
      75             :  * called multiple times. We favor the information from the most recently
      76             :  * sent skb, i.e., the skb with the highest prior_delivered count.
      77             :  */
      78         364 : void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
      79             :                             struct rate_sample *rs)
      80             : {
      81         364 :         struct tcp_sock *tp = tcp_sk(sk);
      82         364 :         struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
      83             : 
      84         364 :         if (!scb->tx.delivered_mstamp)
      85             :                 return;
      86             : 
      87         364 :         if (!rs->prior_delivered ||
      88          13 :             after(scb->tx.delivered, rs->prior_delivered)) {
      89         352 :                 rs->prior_delivered  = scb->tx.delivered;
      90         352 :                 rs->prior_mstamp     = scb->tx.delivered_mstamp;
      91         352 :                 rs->is_app_limited   = scb->tx.is_app_limited;
      92         352 :                 rs->is_retrans            = scb->sacked & TCPCB_RETRANS;
      93             : 
      94             :                 /* Record send time of most recently ACKed packet: */
      95         352 :                 tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
      96             :                 /* Find the duration of the "send phase" of this window: */
      97         352 :                 rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
      98             :                                                      scb->tx.first_tx_mstamp);
      99             : 
     100             :         }
     101             :         /* Mark off the skb delivered once it's sacked to avoid being
     102             :          * used again when it's cumulatively acked. For acked packets
     103             :          * we don't need to reset since it'll be freed soon.
     104             :          */
     105         364 :         if (scb->sacked & TCPCB_SACKED_ACKED)
     106           0 :                 scb->tx.delivered_mstamp = 0;
     107             : }
     108             : 
     109             : /* Update the connection delivery information and generate a rate sample. */
     110         351 : void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
     111             :                   bool is_sack_reneg, struct rate_sample *rs)
     112             : {
     113         351 :         struct tcp_sock *tp = tcp_sk(sk);
     114         351 :         u32 snd_us, ack_us;
     115             : 
     116             :         /* Clear app limited if bubble is acked and gone. */
     117         351 :         if (tp->app_limited && after(tp->delivered, tp->app_limited))
     118         222 :                 tp->app_limited = 0;
     119             : 
     120             :         /* TODO: there are multiple places throughout tcp_ack() to get
     121             :          * current time. Refactor the code using a new "tcp_acktag_state"
     122             :          * to carry current time, flags, stats like "tcp_sacktag_state".
     123             :          */
     124         351 :         if (delivered)
     125         351 :                 tp->delivered_mstamp = tp->tcp_mstamp;
     126             : 
     127         351 :         rs->acked_sacked = delivered;        /* freshly ACKed or SACKed */
     128         351 :         rs->losses = lost;           /* freshly marked lost */
     129             :         /* Return an invalid sample if no timing information is available or
     130             :          * in recovery from loss with SACK reneging. Rate samples taken during
     131             :          * a SACK reneging event may overestimate bw by including packets that
     132             :          * were SACKed before the reneg.
     133             :          */
     134         351 :         if (!rs->prior_mstamp || is_sack_reneg) {
     135           0 :                 rs->delivered = -1;
     136           0 :                 rs->interval_us = -1;
     137           0 :                 return;
     138             :         }
     139         351 :         rs->delivered   = tp->delivered - rs->prior_delivered;
     140             : 
     141             :         /* Model sending data and receiving ACKs as separate pipeline phases
     142             :          * for a window. Usually the ACK phase is longer, but with ACK
     143             :          * compression the send phase can be longer. To be safe we use the
     144             :          * longer phase.
     145             :          */
     146         351 :         snd_us = rs->interval_us;                            /* send phase */
     147         351 :         ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
     148             :                                     rs->prior_mstamp); /* ack phase */
     149         351 :         rs->interval_us = max(snd_us, ack_us);
     150             : 
     151             :         /* Record both segment send and ack receive intervals */
     152         351 :         rs->snd_interval_us = snd_us;
     153         351 :         rs->rcv_interval_us = ack_us;
     154             : 
     155             :         /* Normally we expect interval_us >= min-rtt.
     156             :          * Note that rate may still be over-estimated when a spuriously
     157             :          * retransmistted skb was first (s)acked because "interval_us"
     158             :          * is under-estimated (up to an RTT). However continuously
     159             :          * measuring the delivery rate during loss recovery is crucial
     160             :          * for connections suffer heavy or prolonged losses.
     161             :          */
     162         351 :         if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
     163           0 :                 if (!rs->is_retrans)
     164             :                         pr_debug("tcp rate: %ld %d %u %u %u\n",
     165             :                                  rs->interval_us, rs->delivered,
     166             :                                  inet_csk(sk)->icsk_ca_state,
     167             :                                  tp->rx_opt.sack_ok, tcp_min_rtt(tp));
     168           0 :                 rs->interval_us = -1;
     169           0 :                 return;
     170             :         }
     171             : 
     172             :         /* Record the last non-app-limited or the highest app-limited bw */
     173         351 :         if (!rs->is_app_limited ||
     174         321 :             ((u64)rs->delivered * tp->rate_interval_us >=
     175         321 :              (u64)tp->rate_delivered * rs->interval_us)) {
     176          99 :                 tp->rate_delivered = rs->delivered;
     177          99 :                 tp->rate_interval_us = rs->interval_us;
     178          99 :                 tp->rate_app_limited = rs->is_app_limited;
     179             :         }
     180             : }
     181             : 
     182             : /* If a gap is detected between sends, mark the socket application-limited. */
     183         411 : void tcp_rate_check_app_limited(struct sock *sk)
     184             : {
     185         411 :         struct tcp_sock *tp = tcp_sk(sk);
     186             : 
     187         411 :         if (/* We have less than one packet to send. */
     188         411 :             tp->write_seq - tp->snd_nxt < tp->mss_cache &&
     189             :             /* Nothing in sending host's qdisc queues or NIC tx queue. */
     190         411 :             sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
     191             :             /* We are not limited by CWND. */
     192         271 :             tcp_packets_in_flight(tp) < tp->snd_cwnd &&
     193             :             /* All lost packets have been retransmitted. */
     194             :             tp->lost_out <= tp->retrans_out)
     195         271 :                 tp->app_limited =
     196         271 :                         (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
     197         411 : }
     198             : EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited);

Generated by: LCOV version 1.14