LCOV - code coverage report
Current view: top level - net/ipv4 - tcp_offload.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 79 188 42.0 %
Date: 2021-04-22 12:43:58 Functions: 5 8 62.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  *      IPV4 GSO/GRO offload support
       4             :  *      Linux INET implementation
       5             :  *
       6             :  *      TCPv4 GSO/GRO support
       7             :  */
       8             : 
       9             : #include <linux/indirect_call_wrapper.h>
      10             : #include <linux/skbuff.h>
      11             : #include <net/tcp.h>
      12             : #include <net/protocol.h>
      13             : 
      14           0 : static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
      15             :                            unsigned int seq, unsigned int mss)
      16             : {
      17           0 :         while (skb) {
      18           0 :                 if (before(ts_seq, seq + mss)) {
      19           0 :                         skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
      20           0 :                         skb_shinfo(skb)->tskey = ts_seq;
      21           0 :                         return;
      22             :                 }
      23             : 
      24           0 :                 skb = skb->next;
      25           0 :                 seq += mss;
      26             :         }
      27             : }
      28             : 
      29           0 : static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
      30             :                                         netdev_features_t features)
      31             : {
      32           0 :         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
      33           0 :                 return ERR_PTR(-EINVAL);
      34             : 
      35           0 :         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
      36           0 :                 return ERR_PTR(-EINVAL);
      37             : 
      38           0 :         if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
      39           0 :                 const struct iphdr *iph = ip_hdr(skb);
      40           0 :                 struct tcphdr *th = tcp_hdr(skb);
      41             : 
      42             :                 /* Set up checksum pseudo header, usually expect stack to
      43             :                  * have done this already.
      44             :                  */
      45             : 
      46           0 :                 th->check = 0;
      47           0 :                 skb->ip_summed = CHECKSUM_PARTIAL;
      48           0 :                 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
      49             :         }
      50             : 
      51           0 :         return tcp_gso_segment(skb, features);
      52             : }
      53             : 
      54           0 : struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
      55             :                                 netdev_features_t features)
      56             : {
      57           0 :         struct sk_buff *segs = ERR_PTR(-EINVAL);
      58           0 :         unsigned int sum_truesize = 0;
      59           0 :         struct tcphdr *th;
      60           0 :         unsigned int thlen;
      61           0 :         unsigned int seq;
      62           0 :         __be32 delta;
      63           0 :         unsigned int oldlen;
      64           0 :         unsigned int mss;
      65           0 :         struct sk_buff *gso_skb = skb;
      66           0 :         __sum16 newcheck;
      67           0 :         bool ooo_okay, copy_destructor;
      68             : 
      69           0 :         th = tcp_hdr(skb);
      70           0 :         thlen = th->doff * 4;
      71           0 :         if (thlen < sizeof(*th))
      72           0 :                 goto out;
      73             : 
      74           0 :         if (!pskb_may_pull(skb, thlen))
      75           0 :                 goto out;
      76             : 
      77           0 :         oldlen = (u16)~skb->len;
      78           0 :         __skb_pull(skb, thlen);
      79             : 
      80           0 :         mss = skb_shinfo(skb)->gso_size;
      81           0 :         if (unlikely(skb->len <= mss))
      82           0 :                 goto out;
      83             : 
      84           0 :         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
      85             :                 /* Packet is from an untrusted source, reset gso_segs. */
      86             : 
      87           0 :                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
      88             : 
      89           0 :                 segs = NULL;
      90           0 :                 goto out;
      91             :         }
      92             : 
      93           0 :         copy_destructor = gso_skb->destructor == tcp_wfree;
      94           0 :         ooo_okay = gso_skb->ooo_okay;
      95             :         /* All segments but the first should have ooo_okay cleared */
      96           0 :         skb->ooo_okay = 0;
      97             : 
      98           0 :         segs = skb_segment(skb, features);
      99           0 :         if (IS_ERR(segs))
     100           0 :                 goto out;
     101             : 
     102             :         /* Only first segment might have ooo_okay set */
     103           0 :         segs->ooo_okay = ooo_okay;
     104             : 
     105             :         /* GSO partial and frag_list segmentation only requires splitting
     106             :          * the frame into an MSS multiple and possibly a remainder, both
     107             :          * cases return a GSO skb. So update the mss now.
     108             :          */
     109           0 :         if (skb_is_gso(segs))
     110           0 :                 mss *= skb_shinfo(segs)->gso_segs;
     111             : 
     112           0 :         delta = htonl(oldlen + (thlen + mss));
     113             : 
     114           0 :         skb = segs;
     115           0 :         th = tcp_hdr(skb);
     116           0 :         seq = ntohl(th->seq);
     117             : 
     118           0 :         if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
     119           0 :                 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
     120             : 
     121           0 :         newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
     122             :                                                (__force u32)delta));
     123             : 
     124           0 :         while (skb->next) {
     125           0 :                 th->fin = th->psh = 0;
     126           0 :                 th->check = newcheck;
     127             : 
     128           0 :                 if (skb->ip_summed == CHECKSUM_PARTIAL)
     129           0 :                         gso_reset_checksum(skb, ~th->check);
     130             :                 else
     131           0 :                         th->check = gso_make_checksum(skb, ~th->check);
     132             : 
     133           0 :                 seq += mss;
     134           0 :                 if (copy_destructor) {
     135           0 :                         skb->destructor = gso_skb->destructor;
     136           0 :                         skb->sk = gso_skb->sk;
     137           0 :                         sum_truesize += skb->truesize;
     138             :                 }
     139           0 :                 skb = skb->next;
     140           0 :                 th = tcp_hdr(skb);
     141             : 
     142           0 :                 th->seq = htonl(seq);
     143           0 :                 th->cwr = 0;
     144             :         }
     145             : 
     146             :         /* Following permits TCP Small Queues to work well with GSO :
     147             :          * The callback to TCP stack will be called at the time last frag
     148             :          * is freed at TX completion, and not right now when gso_skb
     149             :          * is freed by GSO engine
     150             :          */
     151           0 :         if (copy_destructor) {
     152           0 :                 int delta;
     153             : 
     154           0 :                 swap(gso_skb->sk, skb->sk);
     155           0 :                 swap(gso_skb->destructor, skb->destructor);
     156           0 :                 sum_truesize += skb->truesize;
     157           0 :                 delta = sum_truesize - gso_skb->truesize;
     158             :                 /* In some pathological cases, delta can be negative.
     159             :                  * We need to either use refcount_add() or refcount_sub_and_test()
     160             :                  */
     161           0 :                 if (likely(delta >= 0))
     162           0 :                         refcount_add(delta, &skb->sk->sk_wmem_alloc);
     163             :                 else
     164           0 :                         WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
     165             :         }
     166             : 
     167           0 :         delta = htonl(oldlen + (skb_tail_pointer(skb) -
     168             :                                 skb_transport_header(skb)) +
     169             :                       skb->data_len);
     170           0 :         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
     171             :                                 (__force u32)delta));
     172           0 :         if (skb->ip_summed == CHECKSUM_PARTIAL)
     173           0 :                 gso_reset_checksum(skb, ~th->check);
     174             :         else
     175           0 :                 th->check = gso_make_checksum(skb, ~th->check);
     176           0 : out:
     177           0 :         return segs;
     178             : }
     179             : 
     180         705 : struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
     181             : {
     182         705 :         struct sk_buff *pp = NULL;
     183         705 :         struct sk_buff *p;
     184         705 :         struct tcphdr *th;
     185         705 :         struct tcphdr *th2;
     186         705 :         unsigned int len;
     187         705 :         unsigned int thlen;
     188         705 :         __be32 flags;
     189         705 :         unsigned int mss = 1;
     190         705 :         unsigned int hlen;
     191         705 :         unsigned int off;
     192         705 :         int flush = 1;
     193         705 :         int i;
     194             : 
     195         705 :         off = skb_gro_offset(skb);
     196         705 :         hlen = off + sizeof(*th);
     197         705 :         th = skb_gro_header_fast(skb, off);
     198         705 :         if (skb_gro_header_hard(skb, hlen)) {
     199         705 :                 th = skb_gro_header_slow(skb, hlen, off);
     200         705 :                 if (unlikely(!th))
     201           0 :                         goto out;
     202             :         }
     203             : 
     204         705 :         thlen = th->doff * 4;
     205         705 :         if (thlen < sizeof(*th))
     206           0 :                 goto out;
     207             : 
     208         705 :         hlen = off + thlen;
     209         705 :         if (skb_gro_header_hard(skb, hlen)) {
     210         705 :                 th = skb_gro_header_slow(skb, hlen, off);
     211         705 :                 if (unlikely(!th))
     212           0 :                         goto out;
     213             :         }
     214             : 
     215         705 :         skb_gro_pull(skb, thlen);
     216             : 
     217         705 :         len = skb_gro_len(skb);
     218         705 :         flags = tcp_flag_word(th);
     219             : 
     220         705 :         list_for_each_entry(p, head, list) {
     221         269 :                 if (!NAPI_GRO_CB(p)->same_flow)
     222           0 :                         continue;
     223             : 
     224         269 :                 th2 = tcp_hdr(p);
     225             : 
     226         269 :                 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
     227           0 :                         NAPI_GRO_CB(p)->same_flow = 0;
     228           0 :                         continue;
     229             :                 }
     230             : 
     231         269 :                 goto found;
     232             :         }
     233         436 :         p = NULL;
     234         436 :         goto out_check_final;
     235             : 
     236         269 : found:
     237             :         /* Include the IP ID check below from the inner most IP hdr */
     238         269 :         flush = NAPI_GRO_CB(p)->flush;
     239         269 :         flush |= (__force int)(flags & TCP_FLAG_CWR);
     240         269 :         flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
     241             :                   ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
     242         269 :         flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
     243         269 :         for (i = sizeof(*th); i < thlen; i += 4)
     244           0 :                 flush |= *(u32 *)((u8 *)th + i) ^
     245           0 :                          *(u32 *)((u8 *)th2 + i);
     246             : 
     247             :         /* When we receive our second frame we can made a decision on if we
     248             :          * continue this flow as an atomic flow with a fixed ID or if we use
     249             :          * an incrementing ID.
     250             :          */
     251         269 :         if (NAPI_GRO_CB(p)->flush_id != 1 ||
     252           0 :             NAPI_GRO_CB(p)->count != 1 ||
     253           0 :             !NAPI_GRO_CB(p)->is_atomic)
     254         269 :                 flush |= NAPI_GRO_CB(p)->flush_id;
     255             :         else
     256           0 :                 NAPI_GRO_CB(p)->is_atomic = false;
     257             : 
     258         269 :         mss = skb_shinfo(p)->gso_size;
     259             : 
     260         269 :         flush |= (len - 1) >= mss;
     261         269 :         flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
     262             : #ifdef CONFIG_TLS_DEVICE
     263             :         flush |= p->decrypted ^ skb->decrypted;
     264             : #endif
     265             : 
     266         269 :         if (flush || skb_gro_receive(p, skb)) {
     267           2 :                 mss = 1;
     268           2 :                 goto out_check_final;
     269             :         }
     270             : 
     271         267 :         tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
     272             : 
     273         705 : out_check_final:
     274         705 :         flush = len < mss;
     275         705 :         flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
     276             :                                         TCP_FLAG_RST | TCP_FLAG_SYN |
     277             :                                         TCP_FLAG_FIN));
     278             : 
     279         705 :         if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
     280           5 :                 pp = p;
     281             : 
     282         700 : out:
     283         705 :         NAPI_GRO_CB(skb)->flush |= (flush != 0);
     284             : 
     285         705 :         return pp;
     286             : }
     287             : 
     288          15 : int tcp_gro_complete(struct sk_buff *skb)
     289             : {
     290          15 :         struct tcphdr *th = tcp_hdr(skb);
     291             : 
     292          15 :         skb->csum_start = (unsigned char *)th - skb->head;
     293          15 :         skb->csum_offset = offsetof(struct tcphdr, check);
     294          15 :         skb->ip_summed = CHECKSUM_PARTIAL;
     295             : 
     296          15 :         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
     297             : 
     298          15 :         if (th->cwr)
     299           0 :                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
     300             : 
     301          15 :         return 0;
     302             : }
     303             : EXPORT_SYMBOL(tcp_gro_complete);
     304             : 
     305             : INDIRECT_CALLABLE_SCOPE
     306         705 : struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
     307             : {
     308             :         /* Don't bother verifying checksum if we're going to flush anyway. */
     309         705 :         if (!NAPI_GRO_CB(skb)->flush &&
     310         705 :             skb_gro_checksum_validate(skb, IPPROTO_TCP,
     311             :                                       inet_gro_compute_pseudo)) {
     312           0 :                 NAPI_GRO_CB(skb)->flush = 1;
     313           0 :                 return NULL;
     314             :         }
     315             : 
     316         705 :         return tcp_gro_receive(head, skb);
     317             : }
     318             : 
     319          15 : INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
     320             : {
     321          15 :         const struct iphdr *iph = ip_hdr(skb);
     322          15 :         struct tcphdr *th = tcp_hdr(skb);
     323             : 
     324          15 :         th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
     325             :                                   iph->daddr, 0);
     326          15 :         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
     327             : 
     328          15 :         if (NAPI_GRO_CB(skb)->is_atomic)
     329           0 :                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
     330             : 
     331          15 :         return tcp_gro_complete(skb);
     332             : }
     333             : 
     334             : static const struct net_offload tcpv4_offload = {
     335             :         .callbacks = {
     336             :                 .gso_segment    =       tcp4_gso_segment,
     337             :                 .gro_receive    =       tcp4_gro_receive,
     338             :                 .gro_complete   =       tcp4_gro_complete,
     339             :         },
     340             : };
     341             : 
     342           1 : int __init tcpv4_offload_init(void)
     343             : {
     344           1 :         return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
     345             : }

Generated by: LCOV version 1.14