LCOV - code coverage report
Current view: top level - include/net - udp.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 37 72 51.4 %
Date: 2021-04-22 12:43:58 Functions: 5 10 50.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /*
       3             :  * INET         An implementation of the TCP/IP protocol suite for the LINUX
       4             :  *              operating system.  INET is implemented using the  BSD Socket
       5             :  *              interface as the means of communication with the user level.
       6             :  *
       7             :  *              Definitions for the UDP module.
       8             :  *
       9             :  * Version:     @(#)udp.h       1.0.2   05/07/93
      10             :  *
      11             :  * Authors:     Ross Biro
      12             :  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
      13             :  *
      14             :  * Fixes:
      15             :  *              Alan Cox        : Turned on udp checksums. I don't want to
      16             :  *                                chase 'memory corruption' bugs that aren't!
      17             :  */
      18             : #ifndef _UDP_H
      19             : #define _UDP_H
      20             : 
      21             : #include <linux/list.h>
      22             : #include <linux/bug.h>
      23             : #include <net/inet_sock.h>
      24             : #include <net/sock.h>
      25             : #include <net/snmp.h>
      26             : #include <net/ip.h>
      27             : #include <linux/ipv6.h>
      28             : #include <linux/seq_file.h>
      29             : #include <linux/poll.h>
      30             : #include <linux/indirect_call_wrapper.h>
      31             : 
      32             : /**
      33             :  *      struct udp_skb_cb  -  UDP(-Lite) private variables
      34             :  *
      35             :  *      @header:      private variables used by IPv4/IPv6
      36             :  *      @cscov:       checksum coverage length (UDP-Lite only)
      37             :  *      @partial_cov: if set indicates partial csum coverage
      38             :  */
      39             : struct udp_skb_cb {
      40             :         union {
      41             :                 struct inet_skb_parm    h4;
      42             : #if IS_ENABLED(CONFIG_IPV6)
      43             :                 struct inet6_skb_parm   h6;
      44             : #endif
      45             :         } header;
      46             :         __u16           cscov;
      47             :         __u8            partial_cov;
      48             : };
      49             : #define UDP_SKB_CB(__skb)       ((struct udp_skb_cb *)((__skb)->cb))
      50             : 
      51             : /**
      52             :  *      struct udp_hslot - UDP hash slot
      53             :  *
      54             :  *      @head:  head of list of sockets
      55             :  *      @count: number of sockets in 'head' list
      56             :  *      @lock:  spinlock protecting changes to head/count
      57             :  */
      58             : struct udp_hslot {
      59             :         struct hlist_head       head;
      60             :         int                     count;
      61             :         spinlock_t              lock;
      62             : } __attribute__((aligned(2 * sizeof(long))));
      63             : 
      64             : /**
      65             :  *      struct udp_table - UDP table
      66             :  *
      67             :  *      @hash:  hash table, sockets are hashed on (local port)
      68             :  *      @hash2: hash table, sockets are hashed on (local port, local address)
      69             :  *      @mask:  number of slots in hash tables, minus 1
      70             :  *      @log:   log2(number of slots in hash table)
      71             :  */
      72             : struct udp_table {
      73             :         struct udp_hslot        *hash;
      74             :         struct udp_hslot        *hash2;
      75             :         unsigned int            mask;
      76             :         unsigned int            log;
      77             : };
      78             : extern struct udp_table udp_table;
      79             : void udp_table_init(struct udp_table *, const char *);
      80          82 : static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
      81             :                                              struct net *net, unsigned int num)
      82             : {
      83          82 :         return &table->hash[udp_hashfn(net, num, table->mask)];
      84             : }
      85             : /*
      86             :  * For secondary hash, net_hash_mix() is performed before calling
      87             :  * udp_hashslot2(), this explains difference with udp_hashslot()
      88             :  */
      89          80 : static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
      90             :                                               unsigned int hash)
      91             : {
      92          80 :         return &table->hash2[hash & table->mask];
      93             : }
      94             : 
      95             : extern struct proto udp_prot;
      96             : 
      97             : extern atomic_long_t udp_memory_allocated;
      98             : 
      99             : /* sysctl variables for udp */
     100             : extern long sysctl_udp_mem[3];
     101             : extern int sysctl_udp_rmem_min;
     102             : extern int sysctl_udp_wmem_min;
     103             : 
     104             : struct sk_buff;
     105             : 
     106             : /*
     107             :  *      Generic checksumming routines for UDP(-Lite) v4 and v6
     108             :  */
     109           0 : static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
     110             : {
     111           0 :         return (UDP_SKB_CB(skb)->cscov == skb->len ?
     112           0 :                 __skb_checksum_complete(skb) :
     113           0 :                 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
     114             : }
     115             : 
     116           2 : static inline int udp_lib_checksum_complete(struct sk_buff *skb)
     117             : {
     118           2 :         return !skb_csum_unnecessary(skb) &&
     119           0 :                 __udp_lib_checksum_complete(skb);
     120             : }
     121             : 
     122             : /**
     123             :  *      udp_csum_outgoing  -  compute UDPv4/v6 checksum over fragments
     124             :  *      @sk:    socket we are writing to
     125             :  *      @skb:   sk_buff containing the filled-in UDP header
     126             :  *              (checksum field must be zeroed out)
     127             :  */
     128             : static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
     129             : {
     130             :         __wsum csum = csum_partial(skb_transport_header(skb),
     131             :                                    sizeof(struct udphdr), 0);
     132             :         skb_queue_walk(&sk->sk_write_queue, skb) {
     133             :                 csum = csum_add(csum, skb->csum);
     134             :         }
     135             :         return csum;
     136             : }
     137             : 
     138          14 : static inline __wsum udp_csum(struct sk_buff *skb)
     139             : {
     140          14 :         __wsum csum = csum_partial(skb_transport_header(skb),
     141             :                                    sizeof(struct udphdr), skb->csum);
     142             : 
     143          14 :         for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
     144           0 :                 csum = csum_add(csum, skb->csum);
     145             :         }
     146          14 :         return csum;
     147             : }
     148             : 
     149           0 : static inline __sum16 udp_v4_check(int len, __be32 saddr,
     150             :                                    __be32 daddr, __wsum base)
     151             : {
     152           0 :         return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
     153             : }
     154             : 
     155             : void udp_set_csum(bool nocheck, struct sk_buff *skb,
     156             :                   __be32 saddr, __be32 daddr, int len);
     157             : 
     158           2 : static inline void udp_csum_pull_header(struct sk_buff *skb)
     159             : {
     160           2 :         if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
     161           0 :                 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
     162             :                                          skb->csum);
     163           2 :         skb_pull_rcsum(skb, sizeof(struct udphdr));
     164           2 :         UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
     165           2 : }
     166             : 
     167             : typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
     168             :                                      __be16 dport);
     169             : 
     170             : INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
     171             :                                                            struct sk_buff *));
     172             : INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
     173             : INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
     174             :                                                            struct sk_buff *));
     175             : INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
     176             : INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
     177             : INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
     178             : 
     179             : struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
     180             :                                 struct udphdr *uh, struct sock *sk);
     181             : int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
     182             : 
     183             : struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
     184             :                                   netdev_features_t features, bool is_ipv6);
     185             : 
     186           2 : static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
     187             : {
     188           2 :         struct udphdr *uh;
     189           2 :         unsigned int hlen, off;
     190             : 
     191           2 :         off  = skb_gro_offset(skb);
     192           2 :         hlen = off + sizeof(*uh);
     193           2 :         uh   = skb_gro_header_fast(skb, off);
     194           2 :         if (skb_gro_header_hard(skb, hlen))
     195           2 :                 uh = skb_gro_header_slow(skb, hlen, off);
     196             : 
     197           2 :         return uh;
     198             : }
     199             : 
     200             : /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
     201           0 : static inline int udp_lib_hash(struct sock *sk)
     202             : {
     203           0 :         BUG();
     204             :         return 0;
     205             : }
     206             : 
     207             : void udp_lib_unhash(struct sock *sk);
     208             : void udp_lib_rehash(struct sock *sk, u16 new_hash);
     209             : 
     210          39 : static inline void udp_lib_close(struct sock *sk, long timeout)
     211             : {
     212          39 :         sk_common_release(sk);
     213          39 : }
     214             : 
     215             : int udp_lib_get_port(struct sock *sk, unsigned short snum,
     216             :                      unsigned int hash2_nulladdr);
     217             : 
     218             : u32 udp_flow_hashrnd(void);
     219             : 
     220             : static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
     221             :                                        int min, int max, bool use_eth)
     222             : {
     223             :         u32 hash;
     224             : 
     225             :         if (min >= max) {
     226             :                 /* Use default range */
     227             :                 inet_get_local_port_range(net, &min, &max);
     228             :         }
     229             : 
     230             :         hash = skb_get_hash(skb);
     231             :         if (unlikely(!hash)) {
     232             :                 if (use_eth) {
     233             :                         /* Can't find a normal hash, caller has indicated an
     234             :                          * Ethernet packet so use that to compute a hash.
     235             :                          */
     236             :                         hash = jhash(skb->data, 2 * ETH_ALEN,
     237             :                                      (__force u32) skb->protocol);
     238             :                 } else {
     239             :                         /* Can't derive any sort of hash for the packet, set
     240             :                          * to some consistent random value.
     241             :                          */
     242             :                         hash = udp_flow_hashrnd();
     243             :                 }
     244             :         }
     245             : 
     246             :         /* Since this is being sent on the wire obfuscate hash a bit
     247             :          * to minimize possbility that any useful information to an
     248             :          * attacker is leaked. Only upper 16 bits are relevant in the
     249             :          * computation for 16 bit port value.
     250             :          */
     251             :         hash ^= hash << 16;
     252             : 
     253             :         return htons((((u64) hash * (max - min)) >> 32) + min);
     254             : }
     255             : 
     256           0 : static inline int udp_rqueue_get(struct sock *sk)
     257             : {
     258           0 :         return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
     259             : }
     260             : 
     261          16 : static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
     262             :                                        int dif, int sdif)
     263             : {
     264             : #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
     265             :         return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
     266             :                                  bound_dev_if, dif, sdif);
     267             : #else
     268          16 :         return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
     269             : #endif
     270             : }
     271             : 
     272             : /* net/ipv4/udp.c */
     273             : void udp_destruct_sock(struct sock *sk);
     274             : void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
     275             : int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
     276             : void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
     277             : struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
     278             :                                int noblock, int *off, int *err);
     279             : static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
     280             :                                            int noblock, int *err)
     281             : {
     282             :         int off = 0;
     283             : 
     284             :         return __skb_recv_udp(sk, flags, noblock, &off, err);
     285             : }
     286             : 
     287             : int udp_v4_early_demux(struct sk_buff *skb);
     288             : bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
     289             : int udp_get_port(struct sock *sk, unsigned short snum,
     290             :                  int (*saddr_cmp)(const struct sock *,
     291             :                                   const struct sock *));
     292             : int udp_err(struct sk_buff *, u32);
     293             : int udp_abort(struct sock *sk, int err);
     294             : int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
     295             : int udp_push_pending_frames(struct sock *sk);
     296             : void udp_flush_pending_frames(struct sock *sk);
     297             : int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
     298             : void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
     299             : int udp_rcv(struct sk_buff *skb);
     300             : int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
     301             : int udp_init_sock(struct sock *sk);
     302             : int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
     303             : int __udp_disconnect(struct sock *sk, int flags);
     304             : int udp_disconnect(struct sock *sk, int flags);
     305             : __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
     306             : struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
     307             :                                        netdev_features_t features,
     308             :                                        bool is_ipv6);
     309             : int udp_lib_getsockopt(struct sock *sk, int level, int optname,
     310             :                        char __user *optval, int __user *optlen);
     311             : int udp_lib_setsockopt(struct sock *sk, int level, int optname,
     312             :                        sockptr_t optval, unsigned int optlen,
     313             :                        int (*push_pending_frames)(struct sock *));
     314             : struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
     315             :                              __be32 daddr, __be16 dport, int dif);
     316             : struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
     317             :                                __be32 daddr, __be16 dport, int dif, int sdif,
     318             :                                struct udp_table *tbl, struct sk_buff *skb);
     319             : struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
     320             :                                  __be16 sport, __be16 dport);
     321             : struct sock *udp6_lib_lookup(struct net *net,
     322             :                              const struct in6_addr *saddr, __be16 sport,
     323             :                              const struct in6_addr *daddr, __be16 dport,
     324             :                              int dif);
     325             : struct sock *__udp6_lib_lookup(struct net *net,
     326             :                                const struct in6_addr *saddr, __be16 sport,
     327             :                                const struct in6_addr *daddr, __be16 dport,
     328             :                                int dif, int sdif, struct udp_table *tbl,
     329             :                                struct sk_buff *skb);
     330             : struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
     331             :                                  __be16 sport, __be16 dport);
     332             : 
     333             : /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
     334             :  * possibly multiple cache miss on dequeue()
     335             :  */
     336             : struct udp_dev_scratch {
     337             :         /* skb->truesize and the stateless bit are embedded in a single field;
     338             :          * do not use a bitfield since the compiler emits better/smaller code
     339             :          * this way
     340             :          */
     341             :         u32 _tsize_state;
     342             : 
     343             : #if BITS_PER_LONG == 64
     344             :         /* len and the bit needed to compute skb_csum_unnecessary
     345             :          * will be on cold cache lines at recvmsg time.
     346             :          * skb->len can be stored on 16 bits since the udp header has been
     347             :          * already validated and pulled.
     348             :          */
     349             :         u16 len;
     350             :         bool is_linear;
     351             :         bool csum_unnecessary;
     352             : #endif
     353             : };
     354             : 
     355          14 : static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
     356             : {
     357           6 :         return (struct udp_dev_scratch *)&skb->dev_scratch;
     358             : }
     359             : 
     360             : #if BITS_PER_LONG == 64
     361           2 : static inline unsigned int udp_skb_len(struct sk_buff *skb)
     362             : {
     363           2 :         return udp_skb_scratch(skb)->len;
     364             : }
     365             : 
     366           2 : static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
     367             : {
     368           2 :         return udp_skb_scratch(skb)->csum_unnecessary;
     369             : }
     370             : 
     371           2 : static inline bool udp_skb_is_linear(struct sk_buff *skb)
     372             : {
     373           2 :         return udp_skb_scratch(skb)->is_linear;
     374             : }
     375             : 
     376             : #else
     377             : static inline unsigned int udp_skb_len(struct sk_buff *skb)
     378             : {
     379             :         return skb->len;
     380             : }
     381             : 
     382             : static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
     383             : {
     384             :         return skb_csum_unnecessary(skb);
     385             : }
     386             : 
     387             : static inline bool udp_skb_is_linear(struct sk_buff *skb)
     388             : {
     389             :         return !skb_is_nonlinear(skb);
     390             : }
     391             : #endif
     392             : 
     393           0 : static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
     394             :                                   struct iov_iter *to)
     395             : {
     396           0 :         int n;
     397             : 
     398           0 :         n = copy_to_iter(skb->data + off, len, to);
     399           0 :         if (n == len)
     400             :                 return 0;
     401             : 
     402           0 :         iov_iter_revert(to, n);
     403           0 :         return -EFAULT;
     404             : }
     405             : 
     406             : /*
     407             :  *      SNMP statistics for UDP and UDP-Lite
     408             :  */
     409             : #define UDP_INC_STATS(net, field, is_udplite)                 do { \
     410             :         if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
     411             :         else            SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
     412             : #define __UDP_INC_STATS(net, field, is_udplite)               do { \
     413             :         if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
     414             :         else            __SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
     415             : 
     416             : #define __UDP6_INC_STATS(net, field, is_udplite)            do { \
     417             :         if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
     418             :         else            __SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
     419             : } while(0)
     420             : #define UDP6_INC_STATS(net, field, __lite)                  do { \
     421             :         if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
     422             :         else        SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
     423             : } while(0)
     424             : 
     425             : #if IS_ENABLED(CONFIG_IPV6)
     426             : #define __UDPX_MIB(sk, ipv4)                                            \
     427             : ({                                                                      \
     428             :         ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :      \
     429             :                                  sock_net(sk)->mib.udp_statistics) : \
     430             :                 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 :      \
     431             :                                  sock_net(sk)->mib.udp_stats_in6);   \
     432             : })
     433             : #else
     434             : #define __UDPX_MIB(sk, ipv4)                                            \
     435             : ({                                                                      \
     436             :         IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :              \
     437             :                          sock_net(sk)->mib.udp_statistics;           \
     438             : })
     439             : #endif
     440             : 
     441             : #define __UDPX_INC_STATS(sk, field) \
     442             :         __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
     443             : 
     444             : #ifdef CONFIG_PROC_FS
     445             : struct udp_seq_afinfo {
     446             :         sa_family_t                     family;
     447             :         struct udp_table                *udp_table;
     448             : };
     449             : 
     450             : struct udp_iter_state {
     451             :         struct seq_net_private  p;
     452             :         int                     bucket;
     453             :         struct udp_seq_afinfo   *bpf_seq_afinfo;
     454             : };
     455             : 
     456             : void *udp_seq_start(struct seq_file *seq, loff_t *pos);
     457             : void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
     458             : void udp_seq_stop(struct seq_file *seq, void *v);
     459             : 
     460             : extern const struct seq_operations udp_seq_ops;
     461             : extern const struct seq_operations udp6_seq_ops;
     462             : 
     463             : int udp4_proc_init(void);
     464             : void udp4_proc_exit(void);
     465             : #endif /* CONFIG_PROC_FS */
     466             : 
     467             : int udpv4_offload_init(void);
     468             : 
     469             : void udp_init(void);
     470             : 
     471             : DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
     472             : void udp_encap_enable(void);
     473             : void udp_encap_disable(void);
     474             : #if IS_ENABLED(CONFIG_IPV6)
     475             : DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
     476             : void udpv6_encap_enable(void);
     477             : #endif
     478             : 
     479           0 : static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
     480             :                                               struct sk_buff *skb, bool ipv4)
     481             : {
     482           0 :         netdev_features_t features = NETIF_F_SG;
     483           0 :         struct sk_buff *segs;
     484             : 
     485             :         /* Avoid csum recalculation by skb_segment unless userspace explicitly
     486             :          * asks for the final checksum values
     487             :          */
     488           0 :         if (!inet_get_convert_csum(sk))
     489           0 :                 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
     490             : 
     491             :         /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
     492             :          * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
     493             :          * packets in udp_gro_complete_segment. As does UDP GSO, verified by
     494             :          * udp_send_skb. But when those packets are looped in dev_loopback_xmit
     495             :          * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
     496             :          * specific case, where PARTIAL is both correct and required.
     497             :          */
     498           0 :         if (skb->pkt_type == PACKET_LOOPBACK)
     499           0 :                 skb->ip_summed = CHECKSUM_PARTIAL;
     500             : 
     501             :         /* the GSO CB lays after the UDP one, no need to save and restore any
     502             :          * CB fragment
     503             :          */
     504           0 :         segs = __skb_gso_segment(skb, features, false);
     505           0 :         if (IS_ERR_OR_NULL(segs)) {
     506           0 :                 int segs_nr = skb_shinfo(skb)->gso_segs;
     507             : 
     508           0 :                 atomic_add(segs_nr, &sk->sk_drops);
     509           0 :                 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
     510           0 :                 kfree_skb(skb);
     511           0 :                 return NULL;
     512             :         }
     513             : 
     514           0 :         consume_skb(skb);
     515           0 :         return segs;
     516             : }
     517             : 
     518             : #ifdef CONFIG_BPF_STREAM_PARSER
     519             : struct sk_psock;
     520             : struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
     521             : #endif /* BPF_STREAM_PARSER */
     522             : 
     523             : #endif  /* _UDP_H */

Generated by: LCOV version 1.14