LCOV - code coverage report
Current view: top level - include/linux - netdevice.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 214 497 43.1 %
Date: 2021-04-22 12:43:58 Functions: 18 40 45.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /*
       3             :  * INET         An implementation of the TCP/IP protocol suite for the LINUX
       4             :  *              operating system.  INET is implemented using the  BSD Socket
       5             :  *              interface as the means of communication with the user level.
       6             :  *
       7             :  *              Definitions for the Interfaces handler.
       8             :  *
       9             :  * Version:     @(#)dev.h       1.0.10  08/12/93
      10             :  *
      11             :  * Authors:     Ross Biro
      12             :  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
      13             :  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
      14             :  *              Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
      15             :  *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
      16             :  *              Bjorn Ekwall. <bj0rn@blox.se>
      17             :  *              Pekka Riikonen <priikone@poseidon.pspt.fi>
      18             :  *
      19             :  *              Moved to /usr/include/linux for NET3
      20             :  */
      21             : #ifndef _LINUX_NETDEVICE_H
      22             : #define _LINUX_NETDEVICE_H
      23             : 
      24             : #include <linux/timer.h>
      25             : #include <linux/bug.h>
      26             : #include <linux/delay.h>
      27             : #include <linux/atomic.h>
      28             : #include <linux/prefetch.h>
      29             : #include <asm/cache.h>
      30             : #include <asm/byteorder.h>
      31             : 
      32             : #include <linux/percpu.h>
      33             : #include <linux/rculist.h>
      34             : #include <linux/workqueue.h>
      35             : #include <linux/dynamic_queue_limits.h>
      36             : 
      37             : #include <net/net_namespace.h>
      38             : #ifdef CONFIG_DCB
      39             : #include <net/dcbnl.h>
      40             : #endif
      41             : #include <net/netprio_cgroup.h>
      42             : #include <net/xdp.h>
      43             : 
      44             : #include <linux/netdev_features.h>
      45             : #include <linux/neighbour.h>
      46             : #include <uapi/linux/netdevice.h>
      47             : #include <uapi/linux/if_bonding.h>
      48             : #include <uapi/linux/pkt_cls.h>
      49             : #include <linux/hashtable.h>
      50             : 
      51             : struct netpoll_info;
      52             : struct device;
      53             : struct ethtool_ops;
      54             : struct phy_device;
      55             : struct dsa_port;
      56             : struct ip_tunnel_parm;
      57             : struct macsec_context;
      58             : struct macsec_ops;
      59             : 
      60             : struct sfp_bus;
      61             : /* 802.11 specific */
      62             : struct wireless_dev;
      63             : /* 802.15.4 specific */
      64             : struct wpan_dev;
      65             : struct mpls_dev;
      66             : /* UDP Tunnel offloads */
      67             : struct udp_tunnel_info;
      68             : struct udp_tunnel_nic_info;
      69             : struct udp_tunnel_nic;
      70             : struct bpf_prog;
      71             : struct xdp_buff;
      72             : 
      73             : void synchronize_net(void);
      74             : void netdev_set_default_ethtool_ops(struct net_device *dev,
      75             :                                     const struct ethtool_ops *ops);
      76             : 
      77             : /* Backlog congestion levels */
      78             : #define NET_RX_SUCCESS          0       /* keep 'em coming, baby */
      79             : #define NET_RX_DROP             1       /* packet dropped */
      80             : 
      81             : #define MAX_NEST_DEV 8
      82             : 
      83             : /*
      84             :  * Transmit return codes: transmit return codes originate from three different
      85             :  * namespaces:
      86             :  *
      87             :  * - qdisc return codes
      88             :  * - driver transmit return codes
      89             :  * - errno values
      90             :  *
      91             :  * Drivers are allowed to return any one of those in their hard_start_xmit()
      92             :  * function. Real network devices commonly used with qdiscs should only return
      93             :  * the driver transmit return codes though - when qdiscs are used, the actual
      94             :  * transmission happens asynchronously, so the value is not propagated to
      95             :  * higher layers. Virtual network devices transmit synchronously; in this case
      96             :  * the driver transmit return codes are consumed by dev_queue_xmit(), and all
      97             :  * others are propagated to higher layers.
      98             :  */
      99             : 
     100             : /* qdisc ->enqueue() return codes. */
     101             : #define NET_XMIT_SUCCESS        0x00
     102             : #define NET_XMIT_DROP           0x01    /* skb dropped                  */
     103             : #define NET_XMIT_CN             0x02    /* congestion notification      */
     104             : #define NET_XMIT_MASK           0x0f    /* qdisc flags in net/sch_generic.h */
     105             : 
     106             : /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
     107             :  * indicates that the device will soon be dropping packets, or already drops
     108             :  * some packets of the same priority; prompting us to send less aggressively. */
     109             : #define net_xmit_eval(e)        ((e) == NET_XMIT_CN ? 0 : (e))
     110             : #define net_xmit_errno(e)       ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
     111             : 
     112             : /* Driver transmit return codes */
     113             : #define NETDEV_TX_MASK          0xf0
     114             : 
     115             : enum netdev_tx {
     116             :         __NETDEV_TX_MIN  = INT_MIN,     /* make sure enum is signed */
     117             :         NETDEV_TX_OK     = 0x00,        /* driver took care of packet */
     118             :         NETDEV_TX_BUSY   = 0x10,        /* driver tx path was busy*/
     119             : };
     120             : typedef enum netdev_tx netdev_tx_t;
     121             : 
     122             : /*
     123             :  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
     124             :  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
     125             :  */
     126         896 : static inline bool dev_xmit_complete(int rc)
     127             : {
     128             :         /*
     129             :          * Positive cases with an skb consumed by a driver:
     130             :          * - successful transmission (rc == NETDEV_TX_OK)
     131             :          * - error while transmitting (rc < 0)
     132             :          * - error while queueing to a different device (rc & NET_XMIT_MASK)
     133             :          */
     134         896 :         if (likely(rc < NET_XMIT_MASK))
     135         448 :                 return true;
     136             : 
     137             :         return false;
     138             : }
     139             : 
     140             : /*
     141             :  *      Compute the worst-case header length according to the protocols
     142             :  *      used.
     143             :  */
     144             : 
     145             : #if defined(CONFIG_HYPERV_NET)
     146             : # define LL_MAX_HEADER 128
     147             : #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
     148             : # if defined(CONFIG_MAC80211_MESH)
     149             : #  define LL_MAX_HEADER 128
     150             : # else
     151             : #  define LL_MAX_HEADER 96
     152             : # endif
     153             : #else
     154             : # define LL_MAX_HEADER 32
     155             : #endif
     156             : 
     157             : #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
     158             :     !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
     159             : #define MAX_HEADER LL_MAX_HEADER
     160             : #else
     161             : #define MAX_HEADER (LL_MAX_HEADER + 48)
     162             : #endif
     163             : 
     164             : /*
     165             :  *      Old network device statistics. Fields are native words
     166             :  *      (unsigned long) so they can be read and written atomically.
     167             :  */
     168             : 
     169             : struct net_device_stats {
     170             :         unsigned long   rx_packets;
     171             :         unsigned long   tx_packets;
     172             :         unsigned long   rx_bytes;
     173             :         unsigned long   tx_bytes;
     174             :         unsigned long   rx_errors;
     175             :         unsigned long   tx_errors;
     176             :         unsigned long   rx_dropped;
     177             :         unsigned long   tx_dropped;
     178             :         unsigned long   multicast;
     179             :         unsigned long   collisions;
     180             :         unsigned long   rx_length_errors;
     181             :         unsigned long   rx_over_errors;
     182             :         unsigned long   rx_crc_errors;
     183             :         unsigned long   rx_frame_errors;
     184             :         unsigned long   rx_fifo_errors;
     185             :         unsigned long   rx_missed_errors;
     186             :         unsigned long   tx_aborted_errors;
     187             :         unsigned long   tx_carrier_errors;
     188             :         unsigned long   tx_fifo_errors;
     189             :         unsigned long   tx_heartbeat_errors;
     190             :         unsigned long   tx_window_errors;
     191             :         unsigned long   rx_compressed;
     192             :         unsigned long   tx_compressed;
     193             : };
     194             : 
     195             : 
     196             : #include <linux/cache.h>
     197             : #include <linux/skbuff.h>
     198             : 
     199             : #ifdef CONFIG_RPS
     200             : #include <linux/static_key.h>
     201             : extern struct static_key_false rps_needed;
     202             : extern struct static_key_false rfs_needed;
     203             : #endif
     204             : 
     205             : struct neighbour;
     206             : struct neigh_parms;
     207             : struct sk_buff;
     208             : 
     209             : struct netdev_hw_addr {
     210             :         struct list_head        list;
     211             :         unsigned char           addr[MAX_ADDR_LEN];
     212             :         unsigned char           type;
     213             : #define NETDEV_HW_ADDR_T_LAN            1
     214             : #define NETDEV_HW_ADDR_T_SAN            2
     215             : #define NETDEV_HW_ADDR_T_UNICAST        3
     216             : #define NETDEV_HW_ADDR_T_MULTICAST      4
     217             :         bool                    global_use;
     218             :         int                     sync_cnt;
     219             :         int                     refcount;
     220             :         int                     synced;
     221             :         struct rcu_head         rcu_head;
     222             : };
     223             : 
     224             : struct netdev_hw_addr_list {
     225             :         struct list_head        list;
     226             :         int                     count;
     227             : };
     228             : 
     229             : #define netdev_hw_addr_list_count(l) ((l)->count)
     230             : #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
     231             : #define netdev_hw_addr_list_for_each(ha, l) \
     232             :         list_for_each_entry(ha, &(l)->list, list)
     233             : 
     234             : #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
     235             : #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
     236             : #define netdev_for_each_uc_addr(ha, dev) \
     237             :         netdev_hw_addr_list_for_each(ha, &(dev)->uc)
     238             : 
     239             : #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
     240             : #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
     241             : #define netdev_for_each_mc_addr(ha, dev) \
     242             :         netdev_hw_addr_list_for_each(ha, &(dev)->mc)
     243             : 
     244             : struct hh_cache {
     245             :         unsigned int    hh_len;
     246             :         seqlock_t       hh_lock;
     247             : 
     248             :         /* cached hardware header; allow for machine alignment needs.        */
     249             : #define HH_DATA_MOD     16
     250             : #define HH_DATA_OFF(__len) \
     251             :         (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
     252             : #define HH_DATA_ALIGN(__len) \
     253             :         (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
     254             :         unsigned long   hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
     255             : };
     256             : 
     257             : /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
     258             :  * Alternative is:
     259             :  *   dev->hard_header_len ? (dev->hard_header_len +
     260             :  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
     261             :  *
     262             :  * We could use other alignment values, but we must maintain the
     263             :  * relationship HH alignment <= LL alignment.
     264             :  */
     265             : #define LL_RESERVED_SPACE(dev) \
     266             :         ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
     267             : #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
     268             :         ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
     269             : 
     270             : struct header_ops {
     271             :         int     (*create) (struct sk_buff *skb, struct net_device *dev,
     272             :                            unsigned short type, const void *daddr,
     273             :                            const void *saddr, unsigned int len);
     274             :         int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
     275             :         int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
     276             :         void    (*cache_update)(struct hh_cache *hh,
     277             :                                 const struct net_device *dev,
     278             :                                 const unsigned char *haddr);
     279             :         bool    (*validate)(const char *ll_header, unsigned int len);
     280             :         __be16  (*parse_protocol)(const struct sk_buff *skb);
     281             : };
     282             : 
     283             : /* These flag bits are private to the generic network queueing
     284             :  * layer; they may not be explicitly referenced by any other
     285             :  * code.
     286             :  */
     287             : 
     288             : enum netdev_state_t {
     289             :         __LINK_STATE_START,
     290             :         __LINK_STATE_PRESENT,
     291             :         __LINK_STATE_NOCARRIER,
     292             :         __LINK_STATE_LINKWATCH_PENDING,
     293             :         __LINK_STATE_DORMANT,
     294             :         __LINK_STATE_TESTING,
     295             : };
     296             : 
     297             : 
     298             : /*
     299             :  * This structure holds boot-time configured netdevice settings. They
     300             :  * are then used in the device probing.
     301             :  */
     302             : struct netdev_boot_setup {
     303             :         char name[IFNAMSIZ];
     304             :         struct ifmap map;
     305             : };
     306             : #define NETDEV_BOOT_SETUP_MAX 8
     307             : 
     308             : int __init netdev_boot_setup(char *str);
     309             : 
     310             : struct gro_list {
     311             :         struct list_head        list;
     312             :         int                     count;
     313             : };
     314             : 
     315             : /*
     316             :  * size of gro hash buckets, must less than bit number of
     317             :  * napi_struct::gro_bitmask
     318             :  */
     319             : #define GRO_HASH_BUCKETS        8
     320             : 
     321             : /*
     322             :  * Structure for NAPI scheduling similar to tasklet but with weighting
     323             :  */
     324             : struct napi_struct {
     325             :         /* The poll_list must only be managed by the entity which
     326             :          * changes the state of the NAPI_STATE_SCHED bit.  This means
     327             :          * whoever atomically sets that bit can add this napi_struct
     328             :          * to the per-CPU poll_list, and whoever clears that bit
     329             :          * can remove from the list right before clearing the bit.
     330             :          */
     331             :         struct list_head        poll_list;
     332             : 
     333             :         unsigned long           state;
     334             :         int                     weight;
     335             :         int                     defer_hard_irqs_count;
     336             :         unsigned long           gro_bitmask;
     337             :         int                     (*poll)(struct napi_struct *, int);
     338             : #ifdef CONFIG_NETPOLL
     339             :         int                     poll_owner;
     340             : #endif
     341             :         struct net_device       *dev;
     342             :         struct gro_list         gro_hash[GRO_HASH_BUCKETS];
     343             :         struct sk_buff          *skb;
     344             :         struct list_head        rx_list; /* Pending GRO_NORMAL skbs */
     345             :         int                     rx_count; /* length of rx_list */
     346             :         struct hrtimer          timer;
     347             :         struct list_head        dev_list;
     348             :         struct hlist_node       napi_hash_node;
     349             :         unsigned int            napi_id;
     350             :         struct task_struct      *thread;
     351             : };
     352             : 
     353             : enum {
     354             :         NAPI_STATE_SCHED,               /* Poll is scheduled */
     355             :         NAPI_STATE_MISSED,              /* reschedule a napi */
     356             :         NAPI_STATE_DISABLE,             /* Disable pending */
     357             :         NAPI_STATE_NPSVC,               /* Netpoll - don't dequeue from poll_list */
     358             :         NAPI_STATE_LISTED,              /* NAPI added to system lists */
     359             :         NAPI_STATE_NO_BUSY_POLL,        /* Do not add in napi_hash, no busy polling */
     360             :         NAPI_STATE_IN_BUSY_POLL,        /* sk_busy_loop() owns this NAPI */
     361             :         NAPI_STATE_PREFER_BUSY_POLL,    /* prefer busy-polling over softirq processing*/
     362             :         NAPI_STATE_THREADED,            /* The poll is performed inside its own thread*/
     363             : };
     364             : 
     365             : enum {
     366             :         NAPIF_STATE_SCHED               = BIT(NAPI_STATE_SCHED),
     367             :         NAPIF_STATE_MISSED              = BIT(NAPI_STATE_MISSED),
     368             :         NAPIF_STATE_DISABLE             = BIT(NAPI_STATE_DISABLE),
     369             :         NAPIF_STATE_NPSVC               = BIT(NAPI_STATE_NPSVC),
     370             :         NAPIF_STATE_LISTED              = BIT(NAPI_STATE_LISTED),
     371             :         NAPIF_STATE_NO_BUSY_POLL        = BIT(NAPI_STATE_NO_BUSY_POLL),
     372             :         NAPIF_STATE_IN_BUSY_POLL        = BIT(NAPI_STATE_IN_BUSY_POLL),
     373             :         NAPIF_STATE_PREFER_BUSY_POLL    = BIT(NAPI_STATE_PREFER_BUSY_POLL),
     374             :         NAPIF_STATE_THREADED            = BIT(NAPI_STATE_THREADED),
     375             : };
     376             : 
     377             : enum gro_result {
     378             :         GRO_MERGED,
     379             :         GRO_MERGED_FREE,
     380             :         GRO_HELD,
     381             :         GRO_NORMAL,
     382             :         GRO_CONSUMED,
     383             : };
     384             : typedef enum gro_result gro_result_t;
     385             : 
     386             : /*
     387             :  * enum rx_handler_result - Possible return values for rx_handlers.
     388             :  * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
     389             :  * further.
     390             :  * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
     391             :  * case skb->dev was changed by rx_handler.
     392             :  * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
     393             :  * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
     394             :  *
     395             :  * rx_handlers are functions called from inside __netif_receive_skb(), to do
     396             :  * special processing of the skb, prior to delivery to protocol handlers.
     397             :  *
     398             :  * Currently, a net_device can only have a single rx_handler registered. Trying
     399             :  * to register a second rx_handler will return -EBUSY.
     400             :  *
     401             :  * To register a rx_handler on a net_device, use netdev_rx_handler_register().
     402             :  * To unregister a rx_handler on a net_device, use
     403             :  * netdev_rx_handler_unregister().
     404             :  *
     405             :  * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
     406             :  * do with the skb.
     407             :  *
     408             :  * If the rx_handler consumed the skb in some way, it should return
     409             :  * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
     410             :  * the skb to be delivered in some other way.
     411             :  *
     412             :  * If the rx_handler changed skb->dev, to divert the skb to another
     413             :  * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
     414             :  * new device will be called if it exists.
     415             :  *
     416             :  * If the rx_handler decides the skb should be ignored, it should return
     417             :  * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
     418             :  * are registered on exact device (ptype->dev == skb->dev).
     419             :  *
     420             :  * If the rx_handler didn't change skb->dev, but wants the skb to be normally
     421             :  * delivered, it should return RX_HANDLER_PASS.
     422             :  *
     423             :  * A device without a registered rx_handler will behave as if rx_handler
     424             :  * returned RX_HANDLER_PASS.
     425             :  */
     426             : 
     427             : enum rx_handler_result {
     428             :         RX_HANDLER_CONSUMED,
     429             :         RX_HANDLER_ANOTHER,
     430             :         RX_HANDLER_EXACT,
     431             :         RX_HANDLER_PASS,
     432             : };
     433             : typedef enum rx_handler_result rx_handler_result_t;
     434             : typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
     435             : 
     436             : void __napi_schedule(struct napi_struct *n);
     437             : void __napi_schedule_irqoff(struct napi_struct *n);
     438             : 
     439           0 : static inline bool napi_disable_pending(struct napi_struct *n)
     440             : {
     441           0 :         return test_bit(NAPI_STATE_DISABLE, &n->state);
     442             : }
     443             : 
     444           0 : static inline bool napi_prefer_busy_poll(struct napi_struct *n)
     445             : {
     446           0 :         return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
     447             : }
     448             : 
     449             : bool napi_schedule_prep(struct napi_struct *n);
     450             : 
     451             : /**
     452             :  *      napi_schedule - schedule NAPI poll
     453             :  *      @n: NAPI context
     454             :  *
     455             :  * Schedule NAPI poll routine to be called if it is not already
     456             :  * running.
     457             :  */
     458           0 : static inline void napi_schedule(struct napi_struct *n)
     459             : {
     460           0 :         if (napi_schedule_prep(n))
     461           0 :                 __napi_schedule(n);
     462           0 : }
     463             : 
     464             : /**
     465             :  *      napi_schedule_irqoff - schedule NAPI poll
     466             :  *      @n: NAPI context
     467             :  *
     468             :  * Variant of napi_schedule(), assuming hard irqs are masked.
     469             :  */
     470             : static inline void napi_schedule_irqoff(struct napi_struct *n)
     471             : {
     472             :         if (napi_schedule_prep(n))
     473             :                 __napi_schedule_irqoff(n);
     474             : }
     475             : 
     476             : /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
     477             : static inline bool napi_reschedule(struct napi_struct *napi)
     478             : {
     479             :         if (napi_schedule_prep(napi)) {
     480             :                 __napi_schedule(napi);
     481             :                 return true;
     482             :         }
     483             :         return false;
     484             : }
     485             : 
     486             : bool napi_complete_done(struct napi_struct *n, int work_done);
     487             : /**
     488             :  *      napi_complete - NAPI processing complete
     489             :  *      @n: NAPI context
     490             :  *
     491             :  * Mark NAPI processing as complete.
     492             :  * Consider using napi_complete_done() instead.
     493             :  * Return false if device should avoid rearming interrupts.
     494             :  */
     495           0 : static inline bool napi_complete(struct napi_struct *n)
     496             : {
     497           0 :         return napi_complete_done(n, 0);
     498             : }
     499             : 
     500             : int dev_set_threaded(struct net_device *dev, bool threaded);
     501             : 
     502             : /**
     503             :  *      napi_disable - prevent NAPI from scheduling
     504             :  *      @n: NAPI context
     505             :  *
     506             :  * Stop NAPI from being scheduled on this context.
     507             :  * Waits till any outstanding processing completes.
     508             :  */
     509             : void napi_disable(struct napi_struct *n);
     510             : 
     511             : void napi_enable(struct napi_struct *n);
     512             : 
     513             : /**
     514             :  *      napi_synchronize - wait until NAPI is not running
     515             :  *      @n: NAPI context
     516             :  *
     517             :  * Wait until NAPI is done being scheduled on this context.
     518             :  * Waits till any outstanding processing completes but
     519             :  * does not disable future activations.
     520             :  */
     521             : static inline void napi_synchronize(const struct napi_struct *n)
     522             : {
     523             :         if (IS_ENABLED(CONFIG_SMP))
     524             :                 while (test_bit(NAPI_STATE_SCHED, &n->state))
     525             :                         msleep(1);
     526             :         else
     527             :                 barrier();
     528             : }
     529             : 
     530             : /**
     531             :  *      napi_if_scheduled_mark_missed - if napi is running, set the
     532             :  *      NAPIF_STATE_MISSED
     533             :  *      @n: NAPI context
     534             :  *
     535             :  * If napi is running, set the NAPIF_STATE_MISSED, and return true if
     536             :  * NAPI is scheduled.
     537             :  **/
     538             : static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
     539             : {
     540             :         unsigned long val, new;
     541             : 
     542             :         do {
     543             :                 val = READ_ONCE(n->state);
     544             :                 if (val & NAPIF_STATE_DISABLE)
     545             :                         return true;
     546             : 
     547             :                 if (!(val & NAPIF_STATE_SCHED))
     548             :                         return false;
     549             : 
     550             :                 new = val | NAPIF_STATE_MISSED;
     551             :         } while (cmpxchg(&n->state, val, new) != val);
     552             : 
     553             :         return true;
     554             : }
     555             : 
     556             : enum netdev_queue_state_t {
     557             :         __QUEUE_STATE_DRV_XOFF,
     558             :         __QUEUE_STATE_STACK_XOFF,
     559             :         __QUEUE_STATE_FROZEN,
     560             : };
     561             : 
     562             : #define QUEUE_STATE_DRV_XOFF    (1 << __QUEUE_STATE_DRV_XOFF)
     563             : #define QUEUE_STATE_STACK_XOFF  (1 << __QUEUE_STATE_STACK_XOFF)
     564             : #define QUEUE_STATE_FROZEN      (1 << __QUEUE_STATE_FROZEN)
     565             : 
     566             : #define QUEUE_STATE_ANY_XOFF    (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
     567             : #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
     568             :                                         QUEUE_STATE_FROZEN)
     569             : #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
     570             :                                         QUEUE_STATE_FROZEN)
     571             : 
     572             : /*
     573             :  * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
     574             :  * netif_tx_* functions below are used to manipulate this flag.  The
     575             :  * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
     576             :  * queue independently.  The netif_xmit_*stopped functions below are called
     577             :  * to check if the queue has been stopped by the driver or stack (either
     578             :  * of the XOFF bits are set in the state).  Drivers should not need to call
     579             :  * netif_xmit*stopped functions, they should only be using netif_tx_*.
     580             :  */
     581             : 
     582             : struct netdev_queue {
     583             : /*
     584             :  * read-mostly part
     585             :  */
     586             :         struct net_device       *dev;
     587             :         struct Qdisc __rcu      *qdisc;
     588             :         struct Qdisc            *qdisc_sleeping;
     589             : #ifdef CONFIG_SYSFS
     590             :         struct kobject          kobj;
     591             : #endif
     592             : #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
     593             :         int                     numa_node;
     594             : #endif
     595             :         unsigned long           tx_maxrate;
     596             :         /*
     597             :          * Number of TX timeouts for this queue
     598             :          * (/sys/class/net/DEV/Q/trans_timeout)
     599             :          */
     600             :         unsigned long           trans_timeout;
     601             : 
     602             :         /* Subordinate device that the queue has been assigned to */
     603             :         struct net_device       *sb_dev;
     604             : #ifdef CONFIG_XDP_SOCKETS
     605             :         struct xsk_buff_pool    *pool;
     606             : #endif
     607             : /*
     608             :  * write-mostly part
     609             :  */
     610             :         spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
     611             :         int                     xmit_lock_owner;
     612             :         /*
     613             :          * Time (in jiffies) of last Tx
     614             :          */
     615             :         unsigned long           trans_start;
     616             : 
     617             :         unsigned long           state;
     618             : 
     619             : #ifdef CONFIG_BQL
     620             :         struct dql              dql;
     621             : #endif
     622             : } ____cacheline_aligned_in_smp;
     623             : 
     624             : extern int sysctl_fb_tunnels_only_for_init_net;
     625             : extern int sysctl_devconf_inherit_init_net;
     626             : 
     627             : /*
     628             :  * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
     629             :  *                                     == 1 : For initns only
     630             :  *                                     == 2 : For none.
     631             :  */
     632             : static inline bool net_has_fallback_tunnels(const struct net *net)
     633             : {
     634             :         return !IS_ENABLED(CONFIG_SYSCTL) ||
     635             :                !sysctl_fb_tunnels_only_for_init_net ||
     636             :                (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
     637             : }
     638             : 
     639           3 : static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
     640             : {
     641             : #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
     642           3 :         return q->numa_node;
     643             : #else
     644             :         return NUMA_NO_NODE;
     645             : #endif
     646             : }
     647             : 
     648           4 : static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
     649             : {
     650             : #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
     651           3 :         q->numa_node = node;
     652             : #endif
     653           1 : }
     654             : 
     655             : #ifdef CONFIG_RPS
     656             : /*
     657             :  * This structure holds an RPS map which can be of variable length.  The
     658             :  * map is an array of CPUs.
     659             :  */
     660             : struct rps_map {
     661             :         unsigned int len;
     662             :         struct rcu_head rcu;
     663             :         u16 cpus[];
     664             : };
     665             : #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
     666             : 
     667             : /*
     668             :  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
     669             :  * tail pointer for that CPU's input queue at the time of last enqueue, and
     670             :  * a hardware filter index.
     671             :  */
     672             : struct rps_dev_flow {
     673             :         u16 cpu;
     674             :         u16 filter;
     675             :         unsigned int last_qtail;
     676             : };
     677             : #define RPS_NO_FILTER 0xffff
     678             : 
     679             : /*
     680             :  * The rps_dev_flow_table structure contains a table of flow mappings.
     681             :  */
     682             : struct rps_dev_flow_table {
     683             :         unsigned int mask;
     684             :         struct rcu_head rcu;
     685             :         struct rps_dev_flow flows[];
     686             : };
     687             : #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
     688             :     ((_num) * sizeof(struct rps_dev_flow)))
     689             : 
     690             : /*
     691             :  * The rps_sock_flow_table contains mappings of flows to the last CPU
     692             :  * on which they were processed by the application (set in recvmsg).
     693             :  * Each entry is a 32bit value. Upper part is the high-order bits
     694             :  * of flow hash, lower part is CPU number.
     695             :  * rps_cpu_mask is used to partition the space, depending on number of
     696             :  * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
     697             :  * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
     698             :  * meaning we use 32-6=26 bits for the hash.
     699             :  */
     700             : struct rps_sock_flow_table {
     701             :         u32     mask;
     702             : 
     703             :         u32     ents[] ____cacheline_aligned_in_smp;
     704             : };
     705             : #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
     706             : 
     707             : #define RPS_NO_CPU 0xffff
     708             : 
     709             : extern u32 rps_cpu_mask;
     710             : extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
     711             : 
     712           0 : static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
     713             :                                         u32 hash)
     714             : {
     715           0 :         if (table && hash) {
     716           0 :                 unsigned int index = hash & table->mask;
     717           0 :                 u32 val = hash & ~rps_cpu_mask;
     718             : 
     719             :                 /* We only give a hint, preemption can change CPU under us */
     720           0 :                 val |= raw_smp_processor_id();
     721             : 
     722           0 :                 if (table->ents[index] != val)
     723           0 :                         table->ents[index] = val;
     724             :         }
     725           0 : }
     726             : 
     727             : #ifdef CONFIG_RFS_ACCEL
     728             : bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
     729             :                          u16 filter_id);
     730             : #endif
     731             : #endif /* CONFIG_RPS */
     732             : 
     733             : /* This structure contains an instance of an RX queue. */
     734             : struct netdev_rx_queue {
     735             : #ifdef CONFIG_RPS
     736             :         struct rps_map __rcu            *rps_map;
     737             :         struct rps_dev_flow_table __rcu *rps_flow_table;
     738             : #endif
     739             :         struct kobject                  kobj;
     740             :         struct net_device               *dev;
     741             :         struct xdp_rxq_info             xdp_rxq;
     742             : #ifdef CONFIG_XDP_SOCKETS
     743             :         struct xsk_buff_pool            *pool;
     744             : #endif
     745             : } ____cacheline_aligned_in_smp;
     746             : 
     747             : /*
     748             :  * RX queue sysfs structures and functions.
     749             :  */
     750             : struct rx_queue_attribute {
     751             :         struct attribute attr;
     752             :         ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
     753             :         ssize_t (*store)(struct netdev_rx_queue *queue,
     754             :                          const char *buf, size_t len);
     755             : };
     756             : 
     757             : #ifdef CONFIG_XPS
     758             : /*
     759             :  * This structure holds an XPS map which can be of variable length.  The
     760             :  * map is an array of queues.
     761             :  */
     762             : struct xps_map {
     763             :         unsigned int len;
     764             :         unsigned int alloc_len;
     765             :         struct rcu_head rcu;
     766             :         u16 queues[];
     767             : };
     768             : #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
     769             : #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
     770             :        - sizeof(struct xps_map)) / sizeof(u16))
     771             : 
     772             : /*
     773             :  * This structure holds all XPS maps for device.  Maps are indexed by CPU.
     774             :  */
     775             : struct xps_dev_maps {
     776             :         struct rcu_head rcu;
     777             :         struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
     778             : };
     779             : 
     780             : #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +      \
     781             :         (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
     782             : 
     783             : #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
     784             :         (_rxqs * (_tcs) * sizeof(struct xps_map *)))
     785             : 
     786             : #endif /* CONFIG_XPS */
     787             : 
     788             : #define TC_MAX_QUEUE    16
     789             : #define TC_BITMASK      15
     790             : /* HW offloaded queuing disciplines txq count and offset maps */
     791             : struct netdev_tc_txq {
     792             :         u16 count;
     793             :         u16 offset;
     794             : };
     795             : 
     796             : #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
     797             : /*
     798             :  * This structure is to hold information about the device
     799             :  * configured to run FCoE protocol stack.
     800             :  */
     801             : struct netdev_fcoe_hbainfo {
     802             :         char    manufacturer[64];
     803             :         char    serial_number[64];
     804             :         char    hardware_version[64];
     805             :         char    driver_version[64];
     806             :         char    optionrom_version[64];
     807             :         char    firmware_version[64];
     808             :         char    model[256];
     809             :         char    model_description[256];
     810             : };
     811             : #endif
     812             : 
     813             : #define MAX_PHYS_ITEM_ID_LEN 32
     814             : 
     815             : /* This structure holds a unique identifier to identify some
     816             :  * physical item (port for example) used by a netdevice.
     817             :  */
     818             : struct netdev_phys_item_id {
     819             :         unsigned char id[MAX_PHYS_ITEM_ID_LEN];
     820             :         unsigned char id_len;
     821             : };
     822             : 
     823           0 : static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
     824             :                                             struct netdev_phys_item_id *b)
     825             : {
     826           0 :         return a->id_len == b->id_len &&
     827           0 :                memcmp(a->id, b->id, a->id_len) == 0;
     828             : }
     829             : 
     830             : typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
     831             :                                        struct sk_buff *skb,
     832             :                                        struct net_device *sb_dev);
     833             : 
     834             : enum tc_setup_type {
     835             :         TC_SETUP_QDISC_MQPRIO,
     836             :         TC_SETUP_CLSU32,
     837             :         TC_SETUP_CLSFLOWER,
     838             :         TC_SETUP_CLSMATCHALL,
     839             :         TC_SETUP_CLSBPF,
     840             :         TC_SETUP_BLOCK,
     841             :         TC_SETUP_QDISC_CBS,
     842             :         TC_SETUP_QDISC_RED,
     843             :         TC_SETUP_QDISC_PRIO,
     844             :         TC_SETUP_QDISC_MQ,
     845             :         TC_SETUP_QDISC_ETF,
     846             :         TC_SETUP_ROOT_QDISC,
     847             :         TC_SETUP_QDISC_GRED,
     848             :         TC_SETUP_QDISC_TAPRIO,
     849             :         TC_SETUP_FT,
     850             :         TC_SETUP_QDISC_ETS,
     851             :         TC_SETUP_QDISC_TBF,
     852             :         TC_SETUP_QDISC_FIFO,
     853             :         TC_SETUP_QDISC_HTB,
     854             : };
     855             : 
     856             : /* These structures hold the attributes of bpf state that are being passed
     857             :  * to the netdevice through the bpf op.
     858             :  */
     859             : enum bpf_netdev_command {
     860             :         /* Set or clear a bpf program used in the earliest stages of packet
     861             :          * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
     862             :          * is responsible for calling bpf_prog_put on any old progs that are
     863             :          * stored. In case of error, the callee need not release the new prog
     864             :          * reference, but on success it takes ownership and must bpf_prog_put
     865             :          * when it is no longer used.
     866             :          */
     867             :         XDP_SETUP_PROG,
     868             :         XDP_SETUP_PROG_HW,
     869             :         /* BPF program for offload callbacks, invoked at program load time. */
     870             :         BPF_OFFLOAD_MAP_ALLOC,
     871             :         BPF_OFFLOAD_MAP_FREE,
     872             :         XDP_SETUP_XSK_POOL,
     873             : };
     874             : 
     875             : struct bpf_prog_offload_ops;
     876             : struct netlink_ext_ack;
     877             : struct xdp_umem;
     878             : struct xdp_dev_bulk_queue;
     879             : struct bpf_xdp_link;
     880             : 
     881             : enum bpf_xdp_mode {
     882             :         XDP_MODE_SKB = 0,
     883             :         XDP_MODE_DRV = 1,
     884             :         XDP_MODE_HW = 2,
     885             :         __MAX_XDP_MODE
     886             : };
     887             : 
     888             : struct bpf_xdp_entity {
     889             :         struct bpf_prog *prog;
     890             :         struct bpf_xdp_link *link;
     891             : };
     892             : 
     893             : struct netdev_bpf {
     894             :         enum bpf_netdev_command command;
     895             :         union {
     896             :                 /* XDP_SETUP_PROG */
     897             :                 struct {
     898             :                         u32 flags;
     899             :                         struct bpf_prog *prog;
     900             :                         struct netlink_ext_ack *extack;
     901             :                 };
     902             :                 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
     903             :                 struct {
     904             :                         struct bpf_offloaded_map *offmap;
     905             :                 };
     906             :                 /* XDP_SETUP_XSK_POOL */
     907             :                 struct {
     908             :                         struct xsk_buff_pool *pool;
     909             :                         u16 queue_id;
     910             :                 } xsk;
     911             :         };
     912             : };
     913             : 
     914             : /* Flags for ndo_xsk_wakeup. */
     915             : #define XDP_WAKEUP_RX (1 << 0)
     916             : #define XDP_WAKEUP_TX (1 << 1)
     917             : 
     918             : #ifdef CONFIG_XFRM_OFFLOAD
     919             : struct xfrmdev_ops {
     920             :         int     (*xdo_dev_state_add) (struct xfrm_state *x);
     921             :         void    (*xdo_dev_state_delete) (struct xfrm_state *x);
     922             :         void    (*xdo_dev_state_free) (struct xfrm_state *x);
     923             :         bool    (*xdo_dev_offload_ok) (struct sk_buff *skb,
     924             :                                        struct xfrm_state *x);
     925             :         void    (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
     926             : };
     927             : #endif
     928             : 
     929             : struct dev_ifalias {
     930             :         struct rcu_head rcuhead;
     931             :         char ifalias[];
     932             : };
     933             : 
     934             : struct devlink;
     935             : struct tlsdev_ops;
     936             : 
     937             : struct netdev_name_node {
     938             :         struct hlist_node hlist;
     939             :         struct list_head list;
     940             :         struct net_device *dev;
     941             :         const char *name;
     942             : };
     943             : 
     944             : int netdev_name_node_alt_create(struct net_device *dev, const char *name);
     945             : int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
     946             : 
     947             : struct netdev_net_notifier {
     948             :         struct list_head list;
     949             :         struct notifier_block *nb;
     950             : };
     951             : 
     952             : /*
     953             :  * This structure defines the management hooks for network devices.
     954             :  * The following hooks can be defined; unless noted otherwise, they are
     955             :  * optional and can be filled with a null pointer.
     956             :  *
     957             :  * int (*ndo_init)(struct net_device *dev);
     958             :  *     This function is called once when a network device is registered.
     959             :  *     The network device can use this for any late stage initialization
     960             :  *     or semantic validation. It can fail with an error code which will
     961             :  *     be propagated back to register_netdev.
     962             :  *
     963             :  * void (*ndo_uninit)(struct net_device *dev);
     964             :  *     This function is called when device is unregistered or when registration
     965             :  *     fails. It is not called if init fails.
     966             :  *
     967             :  * int (*ndo_open)(struct net_device *dev);
     968             :  *     This function is called when a network device transitions to the up
     969             :  *     state.
     970             :  *
     971             :  * int (*ndo_stop)(struct net_device *dev);
     972             :  *     This function is called when a network device transitions to the down
     973             :  *     state.
     974             :  *
     975             :  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
     976             :  *                               struct net_device *dev);
     977             :  *      Called when a packet needs to be transmitted.
     978             :  *      Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
     979             :  *      the queue before that can happen; it's for obsolete devices and weird
     980             :  *      corner cases, but the stack really does a non-trivial amount
     981             :  *      of useless work if you return NETDEV_TX_BUSY.
     982             :  *      Required; cannot be NULL.
     983             :  *
     984             :  * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
     985             :  *                                         struct net_device *dev
     986             :  *                                         netdev_features_t features);
     987             :  *      Called by core transmit path to determine if device is capable of
     988             :  *      performing offload operations on a given packet. This is to give
     989             :  *      the device an opportunity to implement any restrictions that cannot
     990             :  *      be otherwise expressed by feature flags. The check is called with
     991             :  *      the set of features that the stack has calculated and it returns
     992             :  *      those the driver believes to be appropriate.
     993             :  *
     994             :  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
     995             :  *                         struct net_device *sb_dev);
     996             :  *      Called to decide which queue to use when device supports multiple
     997             :  *      transmit queues.
     998             :  *
     999             :  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
    1000             :  *      This function is called to allow device receiver to make
    1001             :  *      changes to configuration when multicast or promiscuous is enabled.
    1002             :  *
    1003             :  * void (*ndo_set_rx_mode)(struct net_device *dev);
    1004             :  *      This function is called device changes address list filtering.
    1005             :  *      If driver handles unicast address filtering, it should set
    1006             :  *      IFF_UNICAST_FLT in its priv_flags.
    1007             :  *
    1008             :  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
    1009             :  *      This function  is called when the Media Access Control address
    1010             :  *      needs to be changed. If this interface is not defined, the
    1011             :  *      MAC address can not be changed.
    1012             :  *
    1013             :  * int (*ndo_validate_addr)(struct net_device *dev);
    1014             :  *      Test if Media Access Control address is valid for the device.
    1015             :  *
    1016             :  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
    1017             :  *      Called when a user requests an ioctl which can't be handled by
    1018             :  *      the generic interface code. If not defined ioctls return
    1019             :  *      not supported error code.
    1020             :  *
    1021             :  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
    1022             :  *      Used to set network devices bus interface parameters. This interface
    1023             :  *      is retained for legacy reasons; new devices should use the bus
    1024             :  *      interface (PCI) for low level management.
    1025             :  *
    1026             :  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
    1027             :  *      Called when a user wants to change the Maximum Transfer Unit
    1028             :  *      of a device.
    1029             :  *
    1030             :  * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
    1031             :  *      Callback used when the transmitter has not made any progress
    1032             :  *      for dev->watchdog ticks.
    1033             :  *
    1034             :  * void (*ndo_get_stats64)(struct net_device *dev,
    1035             :  *                         struct rtnl_link_stats64 *storage);
    1036             :  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
    1037             :  *      Called when a user wants to get the network device usage
    1038             :  *      statistics. Drivers must do one of the following:
    1039             :  *      1. Define @ndo_get_stats64 to fill in a zero-initialised
    1040             :  *         rtnl_link_stats64 structure passed by the caller.
    1041             :  *      2. Define @ndo_get_stats to update a net_device_stats structure
    1042             :  *         (which should normally be dev->stats) and return a pointer to
    1043             :  *         it. The structure may be changed asynchronously only if each
    1044             :  *         field is written atomically.
    1045             :  *      3. Update dev->stats asynchronously and atomically, and define
    1046             :  *         neither operation.
    1047             :  *
    1048             :  * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
    1049             :  *      Return true if this device supports offload stats of this attr_id.
    1050             :  *
    1051             :  * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
    1052             :  *      void *attr_data)
    1053             :  *      Get statistics for offload operations by attr_id. Write it into the
    1054             :  *      attr_data pointer.
    1055             :  *
    1056             :  * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
    1057             :  *      If device supports VLAN filtering this function is called when a
    1058             :  *      VLAN id is registered.
    1059             :  *
    1060             :  * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
    1061             :  *      If device supports VLAN filtering this function is called when a
    1062             :  *      VLAN id is unregistered.
    1063             :  *
    1064             :  * void (*ndo_poll_controller)(struct net_device *dev);
    1065             :  *
    1066             :  *      SR-IOV management functions.
    1067             :  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
    1068             :  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
    1069             :  *                        u8 qos, __be16 proto);
    1070             :  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
    1071             :  *                        int max_tx_rate);
    1072             :  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
    1073             :  * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
    1074             :  * int (*ndo_get_vf_config)(struct net_device *dev,
    1075             :  *                          int vf, struct ifla_vf_info *ivf);
    1076             :  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
    1077             :  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
    1078             :  *                        struct nlattr *port[]);
    1079             :  *
    1080             :  *      Enable or disable the VF ability to query its RSS Redirection Table and
    1081             :  *      Hash Key. This is needed since on some devices VF share this information
    1082             :  *      with PF and querying it may introduce a theoretical security risk.
    1083             :  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
    1084             :  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
    1085             :  * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
    1086             :  *                     void *type_data);
    1087             :  *      Called to setup any 'tc' scheduler, classifier or action on @dev.
    1088             :  *      This is always called from the stack with the rtnl lock held and netif
    1089             :  *      tx queues stopped. This allows the netdevice to perform queue
    1090             :  *      management safely.
    1091             :  *
    1092             :  *      Fiber Channel over Ethernet (FCoE) offload functions.
    1093             :  * int (*ndo_fcoe_enable)(struct net_device *dev);
    1094             :  *      Called when the FCoE protocol stack wants to start using LLD for FCoE
    1095             :  *      so the underlying device can perform whatever needed configuration or
    1096             :  *      initialization to support acceleration of FCoE traffic.
    1097             :  *
    1098             :  * int (*ndo_fcoe_disable)(struct net_device *dev);
    1099             :  *      Called when the FCoE protocol stack wants to stop using LLD for FCoE
    1100             :  *      so the underlying device can perform whatever needed clean-ups to
    1101             :  *      stop supporting acceleration of FCoE traffic.
    1102             :  *
    1103             :  * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
    1104             :  *                           struct scatterlist *sgl, unsigned int sgc);
    1105             :  *      Called when the FCoE Initiator wants to initialize an I/O that
    1106             :  *      is a possible candidate for Direct Data Placement (DDP). The LLD can
    1107             :  *      perform necessary setup and returns 1 to indicate the device is set up
    1108             :  *      successfully to perform DDP on this I/O, otherwise this returns 0.
    1109             :  *
    1110             :  * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
    1111             :  *      Called when the FCoE Initiator/Target is done with the DDPed I/O as
    1112             :  *      indicated by the FC exchange id 'xid', so the underlying device can
    1113             :  *      clean up and reuse resources for later DDP requests.
    1114             :  *
    1115             :  * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
    1116             :  *                            struct scatterlist *sgl, unsigned int sgc);
    1117             :  *      Called when the FCoE Target wants to initialize an I/O that
    1118             :  *      is a possible candidate for Direct Data Placement (DDP). The LLD can
    1119             :  *      perform necessary setup and returns 1 to indicate the device is set up
    1120             :  *      successfully to perform DDP on this I/O, otherwise this returns 0.
    1121             :  *
    1122             :  * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
    1123             :  *                             struct netdev_fcoe_hbainfo *hbainfo);
    1124             :  *      Called when the FCoE Protocol stack wants information on the underlying
    1125             :  *      device. This information is utilized by the FCoE protocol stack to
    1126             :  *      register attributes with Fiber Channel management service as per the
    1127             :  *      FC-GS Fabric Device Management Information(FDMI) specification.
    1128             :  *
    1129             :  * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
    1130             :  *      Called when the underlying device wants to override default World Wide
    1131             :  *      Name (WWN) generation mechanism in FCoE protocol stack to pass its own
    1132             :  *      World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
    1133             :  *      protocol stack to use.
    1134             :  *
    1135             :  *      RFS acceleration.
    1136             :  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
    1137             :  *                          u16 rxq_index, u32 flow_id);
    1138             :  *      Set hardware filter for RFS.  rxq_index is the target queue index;
    1139             :  *      flow_id is a flow ID to be passed to rps_may_expire_flow() later.
    1140             :  *      Return the filter ID on success, or a negative error code.
    1141             :  *
    1142             :  *      Slave management functions (for bridge, bonding, etc).
    1143             :  * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
    1144             :  *      Called to make another netdev an underling.
    1145             :  *
    1146             :  * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
    1147             :  *      Called to release previously enslaved netdev.
    1148             :  *
    1149             :  * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
    1150             :  *                                          struct sk_buff *skb,
    1151             :  *                                          bool all_slaves);
    1152             :  *      Get the xmit slave of master device. If all_slaves is true, function
    1153             :  *      assume all the slaves can transmit.
    1154             :  *
    1155             :  *      Feature/offload setting functions.
    1156             :  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
    1157             :  *              netdev_features_t features);
    1158             :  *      Adjusts the requested feature flags according to device-specific
    1159             :  *      constraints, and returns the resulting flags. Must not modify
    1160             :  *      the device state.
    1161             :  *
    1162             :  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
    1163             :  *      Called to update device configuration to new features. Passed
    1164             :  *      feature set might be less than what was returned by ndo_fix_features()).
    1165             :  *      Must return >0 or -errno if it changed dev->features itself.
    1166             :  *
    1167             :  * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
    1168             :  *                    struct net_device *dev,
    1169             :  *                    const unsigned char *addr, u16 vid, u16 flags,
    1170             :  *                    struct netlink_ext_ack *extack);
    1171             :  *      Adds an FDB entry to dev for addr.
    1172             :  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
    1173             :  *                    struct net_device *dev,
    1174             :  *                    const unsigned char *addr, u16 vid)
    1175             :  *      Deletes the FDB entry from dev coresponding to addr.
    1176             :  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
    1177             :  *                     struct net_device *dev, struct net_device *filter_dev,
    1178             :  *                     int *idx)
    1179             :  *      Used to add FDB entries to dump requests. Implementers should add
    1180             :  *      entries to skb and update idx with the number of entries.
    1181             :  *
    1182             :  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
    1183             :  *                           u16 flags, struct netlink_ext_ack *extack)
    1184             :  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
    1185             :  *                           struct net_device *dev, u32 filter_mask,
    1186             :  *                           int nlflags)
    1187             :  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
    1188             :  *                           u16 flags);
    1189             :  *
    1190             :  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
    1191             :  *      Called to change device carrier. Soft-devices (like dummy, team, etc)
    1192             :  *      which do not represent real hardware may define this to allow their
    1193             :  *      userspace components to manage their virtual carrier state. Devices
    1194             :  *      that determine carrier state from physical hardware properties (eg
    1195             :  *      network cables) or protocol-dependent mechanisms (eg
    1196             :  *      USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
    1197             :  *
    1198             :  * int (*ndo_get_phys_port_id)(struct net_device *dev,
    1199             :  *                             struct netdev_phys_item_id *ppid);
    1200             :  *      Called to get ID of physical port of this device. If driver does
    1201             :  *      not implement this, it is assumed that the hw is not able to have
    1202             :  *      multiple net devices on single physical port.
    1203             :  *
    1204             :  * int (*ndo_get_port_parent_id)(struct net_device *dev,
    1205             :  *                               struct netdev_phys_item_id *ppid)
    1206             :  *      Called to get the parent ID of the physical port of this device.
    1207             :  *
    1208             :  * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
    1209             :  *                               struct net_device *dev)
    1210             :  *      Called by upper layer devices to accelerate switching or other
    1211             :  *      station functionality into hardware. 'pdev is the lowerdev
    1212             :  *      to use for the offload and 'dev' is the net device that will
    1213             :  *      back the offload. Returns a pointer to the private structure
    1214             :  *      the upper layer will maintain.
    1215             :  * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
    1216             :  *      Called by upper layer device to delete the station created
    1217             :  *      by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
    1218             :  *      the station and priv is the structure returned by the add
    1219             :  *      operation.
    1220             :  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
    1221             :  *                           int queue_index, u32 maxrate);
    1222             :  *      Called when a user wants to set a max-rate limitation of specific
    1223             :  *      TX queue.
    1224             :  * int (*ndo_get_iflink)(const struct net_device *dev);
    1225             :  *      Called to get the iflink value of this device.
    1226             :  * void (*ndo_change_proto_down)(struct net_device *dev,
    1227             :  *                               bool proto_down);
    1228             :  *      This function is used to pass protocol port error state information
    1229             :  *      to the switch driver. The switch driver can react to the proto_down
    1230             :  *      by doing a phys down on the associated switch port.
    1231             :  * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
    1232             :  *      This function is used to get egress tunnel information for given skb.
    1233             :  *      This is useful for retrieving outer tunnel header parameters while
    1234             :  *      sampling packet.
    1235             :  * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
    1236             :  *      This function is used to specify the headroom that the skb must
    1237             :  *      consider when allocation skb during packet reception. Setting
    1238             :  *      appropriate rx headroom value allows avoiding skb head copy on
    1239             :  *      forward. Setting a negative value resets the rx headroom to the
    1240             :  *      default value.
    1241             :  * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
    1242             :  *      This function is used to set or query state related to XDP on the
    1243             :  *      netdevice and manage BPF offload. See definition of
    1244             :  *      enum bpf_netdev_command for details.
    1245             :  * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
    1246             :  *                      u32 flags);
    1247             :  *      This function is used to submit @n XDP packets for transmit on a
    1248             :  *      netdevice. Returns number of frames successfully transmitted, frames
    1249             :  *      that got dropped are freed/returned via xdp_return_frame().
    1250             :  *      Returns negative number, means general error invoking ndo, meaning
    1251             :  *      no frames were xmit'ed and core-caller will free all frames.
    1252             :  * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
    1253             :  *      This function is used to wake up the softirq, ksoftirqd or kthread
    1254             :  *      responsible for sending and/or receiving packets on a specific
    1255             :  *      queue id bound to an AF_XDP socket. The flags field specifies if
    1256             :  *      only RX, only Tx, or both should be woken up using the flags
    1257             :  *      XDP_WAKEUP_RX and XDP_WAKEUP_TX.
    1258             :  * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
    1259             :  *      Get devlink port instance associated with a given netdev.
    1260             :  *      Called with a reference on the netdevice and devlink locks only,
    1261             :  *      rtnl_lock is not held.
    1262             :  * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
    1263             :  *                       int cmd);
    1264             :  *      Add, change, delete or get information on an IPv4 tunnel.
    1265             :  * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
    1266             :  *      If a device is paired with a peer device, return the peer instance.
    1267             :  *      The caller must be under RCU read context.
    1268             :  */
    1269             : struct net_device_ops {
    1270             :         int                     (*ndo_init)(struct net_device *dev);
    1271             :         void                    (*ndo_uninit)(struct net_device *dev);
    1272             :         int                     (*ndo_open)(struct net_device *dev);
    1273             :         int                     (*ndo_stop)(struct net_device *dev);
    1274             :         netdev_tx_t             (*ndo_start_xmit)(struct sk_buff *skb,
    1275             :                                                   struct net_device *dev);
    1276             :         netdev_features_t       (*ndo_features_check)(struct sk_buff *skb,
    1277             :                                                       struct net_device *dev,
    1278             :                                                       netdev_features_t features);
    1279             :         u16                     (*ndo_select_queue)(struct net_device *dev,
    1280             :                                                     struct sk_buff *skb,
    1281             :                                                     struct net_device *sb_dev);
    1282             :         void                    (*ndo_change_rx_flags)(struct net_device *dev,
    1283             :                                                        int flags);
    1284             :         void                    (*ndo_set_rx_mode)(struct net_device *dev);
    1285             :         int                     (*ndo_set_mac_address)(struct net_device *dev,
    1286             :                                                        void *addr);
    1287             :         int                     (*ndo_validate_addr)(struct net_device *dev);
    1288             :         int                     (*ndo_do_ioctl)(struct net_device *dev,
    1289             :                                                 struct ifreq *ifr, int cmd);
    1290             :         int                     (*ndo_set_config)(struct net_device *dev,
    1291             :                                                   struct ifmap *map);
    1292             :         int                     (*ndo_change_mtu)(struct net_device *dev,
    1293             :                                                   int new_mtu);
    1294             :         int                     (*ndo_neigh_setup)(struct net_device *dev,
    1295             :                                                    struct neigh_parms *);
    1296             :         void                    (*ndo_tx_timeout) (struct net_device *dev,
    1297             :                                                    unsigned int txqueue);
    1298             : 
    1299             :         void                    (*ndo_get_stats64)(struct net_device *dev,
    1300             :                                                    struct rtnl_link_stats64 *storage);
    1301             :         bool                    (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
    1302             :         int                     (*ndo_get_offload_stats)(int attr_id,
    1303             :                                                          const struct net_device *dev,
    1304             :                                                          void *attr_data);
    1305             :         struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
    1306             : 
    1307             :         int                     (*ndo_vlan_rx_add_vid)(struct net_device *dev,
    1308             :                                                        __be16 proto, u16 vid);
    1309             :         int                     (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
    1310             :                                                         __be16 proto, u16 vid);
    1311             : #ifdef CONFIG_NET_POLL_CONTROLLER
    1312             :         void                    (*ndo_poll_controller)(struct net_device *dev);
    1313             :         int                     (*ndo_netpoll_setup)(struct net_device *dev,
    1314             :                                                      struct netpoll_info *info);
    1315             :         void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
    1316             : #endif
    1317             :         int                     (*ndo_set_vf_mac)(struct net_device *dev,
    1318             :                                                   int queue, u8 *mac);
    1319             :         int                     (*ndo_set_vf_vlan)(struct net_device *dev,
    1320             :                                                    int queue, u16 vlan,
    1321             :                                                    u8 qos, __be16 proto);
    1322             :         int                     (*ndo_set_vf_rate)(struct net_device *dev,
    1323             :                                                    int vf, int min_tx_rate,
    1324             :                                                    int max_tx_rate);
    1325             :         int                     (*ndo_set_vf_spoofchk)(struct net_device *dev,
    1326             :                                                        int vf, bool setting);
    1327             :         int                     (*ndo_set_vf_trust)(struct net_device *dev,
    1328             :                                                     int vf, bool setting);
    1329             :         int                     (*ndo_get_vf_config)(struct net_device *dev,
    1330             :                                                      int vf,
    1331             :                                                      struct ifla_vf_info *ivf);
    1332             :         int                     (*ndo_set_vf_link_state)(struct net_device *dev,
    1333             :                                                          int vf, int link_state);
    1334             :         int                     (*ndo_get_vf_stats)(struct net_device *dev,
    1335             :                                                     int vf,
    1336             :                                                     struct ifla_vf_stats
    1337             :                                                     *vf_stats);
    1338             :         int                     (*ndo_set_vf_port)(struct net_device *dev,
    1339             :                                                    int vf,
    1340             :                                                    struct nlattr *port[]);
    1341             :         int                     (*ndo_get_vf_port)(struct net_device *dev,
    1342             :                                                    int vf, struct sk_buff *skb);
    1343             :         int                     (*ndo_get_vf_guid)(struct net_device *dev,
    1344             :                                                    int vf,
    1345             :                                                    struct ifla_vf_guid *node_guid,
    1346             :                                                    struct ifla_vf_guid *port_guid);
    1347             :         int                     (*ndo_set_vf_guid)(struct net_device *dev,
    1348             :                                                    int vf, u64 guid,
    1349             :                                                    int guid_type);
    1350             :         int                     (*ndo_set_vf_rss_query_en)(
    1351             :                                                    struct net_device *dev,
    1352             :                                                    int vf, bool setting);
    1353             :         int                     (*ndo_setup_tc)(struct net_device *dev,
    1354             :                                                 enum tc_setup_type type,
    1355             :                                                 void *type_data);
    1356             : #if IS_ENABLED(CONFIG_FCOE)
    1357             :         int                     (*ndo_fcoe_enable)(struct net_device *dev);
    1358             :         int                     (*ndo_fcoe_disable)(struct net_device *dev);
    1359             :         int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
    1360             :                                                       u16 xid,
    1361             :                                                       struct scatterlist *sgl,
    1362             :                                                       unsigned int sgc);
    1363             :         int                     (*ndo_fcoe_ddp_done)(struct net_device *dev,
    1364             :                                                      u16 xid);
    1365             :         int                     (*ndo_fcoe_ddp_target)(struct net_device *dev,
    1366             :                                                        u16 xid,
    1367             :                                                        struct scatterlist *sgl,
    1368             :                                                        unsigned int sgc);
    1369             :         int                     (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
    1370             :                                                         struct netdev_fcoe_hbainfo *hbainfo);
    1371             : #endif
    1372             : 
    1373             : #if IS_ENABLED(CONFIG_LIBFCOE)
    1374             : #define NETDEV_FCOE_WWNN 0
    1375             : #define NETDEV_FCOE_WWPN 1
    1376             :         int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
    1377             :                                                     u64 *wwn, int type);
    1378             : #endif
    1379             : 
    1380             : #ifdef CONFIG_RFS_ACCEL
    1381             :         int                     (*ndo_rx_flow_steer)(struct net_device *dev,
    1382             :                                                      const struct sk_buff *skb,
    1383             :                                                      u16 rxq_index,
    1384             :                                                      u32 flow_id);
    1385             : #endif
    1386             :         int                     (*ndo_add_slave)(struct net_device *dev,
    1387             :                                                  struct net_device *slave_dev,
    1388             :                                                  struct netlink_ext_ack *extack);
    1389             :         int                     (*ndo_del_slave)(struct net_device *dev,
    1390             :                                                  struct net_device *slave_dev);
    1391             :         struct net_device*      (*ndo_get_xmit_slave)(struct net_device *dev,
    1392             :                                                       struct sk_buff *skb,
    1393             :                                                       bool all_slaves);
    1394             :         struct net_device*      (*ndo_sk_get_lower_dev)(struct net_device *dev,
    1395             :                                                         struct sock *sk);
    1396             :         netdev_features_t       (*ndo_fix_features)(struct net_device *dev,
    1397             :                                                     netdev_features_t features);
    1398             :         int                     (*ndo_set_features)(struct net_device *dev,
    1399             :                                                     netdev_features_t features);
    1400             :         int                     (*ndo_neigh_construct)(struct net_device *dev,
    1401             :                                                        struct neighbour *n);
    1402             :         void                    (*ndo_neigh_destroy)(struct net_device *dev,
    1403             :                                                      struct neighbour *n);
    1404             : 
    1405             :         int                     (*ndo_fdb_add)(struct ndmsg *ndm,
    1406             :                                                struct nlattr *tb[],
    1407             :                                                struct net_device *dev,
    1408             :                                                const unsigned char *addr,
    1409             :                                                u16 vid,
    1410             :                                                u16 flags,
    1411             :                                                struct netlink_ext_ack *extack);
    1412             :         int                     (*ndo_fdb_del)(struct ndmsg *ndm,
    1413             :                                                struct nlattr *tb[],
    1414             :                                                struct net_device *dev,
    1415             :                                                const unsigned char *addr,
    1416             :                                                u16 vid);
    1417             :         int                     (*ndo_fdb_dump)(struct sk_buff *skb,
    1418             :                                                 struct netlink_callback *cb,
    1419             :                                                 struct net_device *dev,
    1420             :                                                 struct net_device *filter_dev,
    1421             :                                                 int *idx);
    1422             :         int                     (*ndo_fdb_get)(struct sk_buff *skb,
    1423             :                                                struct nlattr *tb[],
    1424             :                                                struct net_device *dev,
    1425             :                                                const unsigned char *addr,
    1426             :                                                u16 vid, u32 portid, u32 seq,
    1427             :                                                struct netlink_ext_ack *extack);
    1428             :         int                     (*ndo_bridge_setlink)(struct net_device *dev,
    1429             :                                                       struct nlmsghdr *nlh,
    1430             :                                                       u16 flags,
    1431             :                                                       struct netlink_ext_ack *extack);
    1432             :         int                     (*ndo_bridge_getlink)(struct sk_buff *skb,
    1433             :                                                       u32 pid, u32 seq,
    1434             :                                                       struct net_device *dev,
    1435             :                                                       u32 filter_mask,
    1436             :                                                       int nlflags);
    1437             :         int                     (*ndo_bridge_dellink)(struct net_device *dev,
    1438             :                                                       struct nlmsghdr *nlh,
    1439             :                                                       u16 flags);
    1440             :         int                     (*ndo_change_carrier)(struct net_device *dev,
    1441             :                                                       bool new_carrier);
    1442             :         int                     (*ndo_get_phys_port_id)(struct net_device *dev,
    1443             :                                                         struct netdev_phys_item_id *ppid);
    1444             :         int                     (*ndo_get_port_parent_id)(struct net_device *dev,
    1445             :                                                           struct netdev_phys_item_id *ppid);
    1446             :         int                     (*ndo_get_phys_port_name)(struct net_device *dev,
    1447             :                                                           char *name, size_t len);
    1448             :         void*                   (*ndo_dfwd_add_station)(struct net_device *pdev,
    1449             :                                                         struct net_device *dev);
    1450             :         void                    (*ndo_dfwd_del_station)(struct net_device *pdev,
    1451             :                                                         void *priv);
    1452             : 
    1453             :         int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
    1454             :                                                       int queue_index,
    1455             :                                                       u32 maxrate);
    1456             :         int                     (*ndo_get_iflink)(const struct net_device *dev);
    1457             :         int                     (*ndo_change_proto_down)(struct net_device *dev,
    1458             :                                                          bool proto_down);
    1459             :         int                     (*ndo_fill_metadata_dst)(struct net_device *dev,
    1460             :                                                        struct sk_buff *skb);
    1461             :         void                    (*ndo_set_rx_headroom)(struct net_device *dev,
    1462             :                                                        int needed_headroom);
    1463             :         int                     (*ndo_bpf)(struct net_device *dev,
    1464             :                                            struct netdev_bpf *bpf);
    1465             :         int                     (*ndo_xdp_xmit)(struct net_device *dev, int n,
    1466             :                                                 struct xdp_frame **xdp,
    1467             :                                                 u32 flags);
    1468             :         int                     (*ndo_xsk_wakeup)(struct net_device *dev,
    1469             :                                                   u32 queue_id, u32 flags);
    1470             :         struct devlink_port *   (*ndo_get_devlink_port)(struct net_device *dev);
    1471             :         int                     (*ndo_tunnel_ctl)(struct net_device *dev,
    1472             :                                                   struct ip_tunnel_parm *p, int cmd);
    1473             :         struct net_device *     (*ndo_get_peer_dev)(struct net_device *dev);
    1474             : };
    1475             : 
    1476             : /**
    1477             :  * enum netdev_priv_flags - &struct net_device priv_flags
    1478             :  *
    1479             :  * These are the &struct net_device, they are only set internally
    1480             :  * by drivers and used in the kernel. These flags are invisible to
    1481             :  * userspace; this means that the order of these flags can change
    1482             :  * during any kernel release.
    1483             :  *
    1484             :  * You should have a pretty good reason to be extending these flags.
    1485             :  *
    1486             :  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
    1487             :  * @IFF_EBRIDGE: Ethernet bridging device
    1488             :  * @IFF_BONDING: bonding master or slave
    1489             :  * @IFF_ISATAP: ISATAP interface (RFC4214)
    1490             :  * @IFF_WAN_HDLC: WAN HDLC device
    1491             :  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
    1492             :  *      release skb->dst
    1493             :  * @IFF_DONT_BRIDGE: disallow bridging this ether dev
    1494             :  * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
    1495             :  * @IFF_MACVLAN_PORT: device used as macvlan port
    1496             :  * @IFF_BRIDGE_PORT: device used as bridge port
    1497             :  * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
    1498             :  * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
    1499             :  * @IFF_UNICAST_FLT: Supports unicast filtering
    1500             :  * @IFF_TEAM_PORT: device used as team port
    1501             :  * @IFF_SUPP_NOFCS: device supports sending custom FCS
    1502             :  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
    1503             :  *      change when it's running
    1504             :  * @IFF_MACVLAN: Macvlan device
    1505             :  * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
    1506             :  *      underlying stacked devices
    1507             :  * @IFF_L3MDEV_MASTER: device is an L3 master device
    1508             :  * @IFF_NO_QUEUE: device can run without qdisc attached
    1509             :  * @IFF_OPENVSWITCH: device is a Open vSwitch master
    1510             :  * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
    1511             :  * @IFF_TEAM: device is a team device
    1512             :  * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
    1513             :  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
    1514             :  *      entity (i.e. the master device for bridged veth)
    1515             :  * @IFF_MACSEC: device is a MACsec device
    1516             :  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
    1517             :  * @IFF_FAILOVER: device is a failover master device
    1518             :  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
    1519             :  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
    1520             :  * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
    1521             :  */
    1522             : enum netdev_priv_flags {
    1523             :         IFF_802_1Q_VLAN                 = 1<<0,
    1524             :         IFF_EBRIDGE                     = 1<<1,
    1525             :         IFF_BONDING                     = 1<<2,
    1526             :         IFF_ISATAP                      = 1<<3,
    1527             :         IFF_WAN_HDLC                    = 1<<4,
    1528             :         IFF_XMIT_DST_RELEASE            = 1<<5,
    1529             :         IFF_DONT_BRIDGE                 = 1<<6,
    1530             :         IFF_DISABLE_NETPOLL             = 1<<7,
    1531             :         IFF_MACVLAN_PORT                = 1<<8,
    1532             :         IFF_BRIDGE_PORT                 = 1<<9,
    1533             :         IFF_OVS_DATAPATH                = 1<<10,
    1534             :         IFF_TX_SKB_SHARING              = 1<<11,
    1535             :         IFF_UNICAST_FLT                 = 1<<12,
    1536             :         IFF_TEAM_PORT                   = 1<<13,
    1537             :         IFF_SUPP_NOFCS                  = 1<<14,
    1538             :         IFF_LIVE_ADDR_CHANGE            = 1<<15,
    1539             :         IFF_MACVLAN                     = 1<<16,
    1540             :         IFF_XMIT_DST_RELEASE_PERM       = 1<<17,
    1541             :         IFF_L3MDEV_MASTER               = 1<<18,
    1542             :         IFF_NO_QUEUE                    = 1<<19,
    1543             :         IFF_OPENVSWITCH                 = 1<<20,
    1544             :         IFF_L3MDEV_SLAVE                = 1<<21,
    1545             :         IFF_TEAM                        = 1<<22,
    1546             :         IFF_RXFH_CONFIGURED             = 1<<23,
    1547             :         IFF_PHONY_HEADROOM              = 1<<24,
    1548             :         IFF_MACSEC                      = 1<<25,
    1549             :         IFF_NO_RX_HANDLER               = 1<<26,
    1550             :         IFF_FAILOVER                    = 1<<27,
    1551             :         IFF_FAILOVER_SLAVE              = 1<<28,
    1552             :         IFF_L3MDEV_RX_HANDLER           = 1<<29,
    1553             :         IFF_LIVE_RENAME_OK              = 1<<30,
    1554             : };
    1555             : 
    1556             : #define IFF_802_1Q_VLAN                 IFF_802_1Q_VLAN
    1557             : #define IFF_EBRIDGE                     IFF_EBRIDGE
    1558             : #define IFF_BONDING                     IFF_BONDING
    1559             : #define IFF_ISATAP                      IFF_ISATAP
    1560             : #define IFF_WAN_HDLC                    IFF_WAN_HDLC
    1561             : #define IFF_XMIT_DST_RELEASE            IFF_XMIT_DST_RELEASE
    1562             : #define IFF_DONT_BRIDGE                 IFF_DONT_BRIDGE
    1563             : #define IFF_DISABLE_NETPOLL             IFF_DISABLE_NETPOLL
    1564             : #define IFF_MACVLAN_PORT                IFF_MACVLAN_PORT
    1565             : #define IFF_BRIDGE_PORT                 IFF_BRIDGE_PORT
    1566             : #define IFF_OVS_DATAPATH                IFF_OVS_DATAPATH
    1567             : #define IFF_TX_SKB_SHARING              IFF_TX_SKB_SHARING
    1568             : #define IFF_UNICAST_FLT                 IFF_UNICAST_FLT
    1569             : #define IFF_TEAM_PORT                   IFF_TEAM_PORT
    1570             : #define IFF_SUPP_NOFCS                  IFF_SUPP_NOFCS
    1571             : #define IFF_LIVE_ADDR_CHANGE            IFF_LIVE_ADDR_CHANGE
    1572             : #define IFF_MACVLAN                     IFF_MACVLAN
    1573             : #define IFF_XMIT_DST_RELEASE_PERM       IFF_XMIT_DST_RELEASE_PERM
    1574             : #define IFF_L3MDEV_MASTER               IFF_L3MDEV_MASTER
    1575             : #define IFF_NO_QUEUE                    IFF_NO_QUEUE
    1576             : #define IFF_OPENVSWITCH                 IFF_OPENVSWITCH
    1577             : #define IFF_L3MDEV_SLAVE                IFF_L3MDEV_SLAVE
    1578             : #define IFF_TEAM                        IFF_TEAM
    1579             : #define IFF_RXFH_CONFIGURED             IFF_RXFH_CONFIGURED
    1580             : #define IFF_MACSEC                      IFF_MACSEC
    1581             : #define IFF_NO_RX_HANDLER               IFF_NO_RX_HANDLER
    1582             : #define IFF_FAILOVER                    IFF_FAILOVER
    1583             : #define IFF_FAILOVER_SLAVE              IFF_FAILOVER_SLAVE
    1584             : #define IFF_L3MDEV_RX_HANDLER           IFF_L3MDEV_RX_HANDLER
    1585             : #define IFF_LIVE_RENAME_OK              IFF_LIVE_RENAME_OK
    1586             : 
    1587             : /* Specifies the type of the struct net_device::ml_priv pointer */
    1588             : enum netdev_ml_priv_type {
    1589             :         ML_PRIV_NONE,
    1590             :         ML_PRIV_CAN,
    1591             : };
    1592             : 
    1593             : /**
    1594             :  *      struct net_device - The DEVICE structure.
    1595             :  *
    1596             :  *      Actually, this whole structure is a big mistake.  It mixes I/O
    1597             :  *      data with strictly "high-level" data, and it has to know about
    1598             :  *      almost every data structure used in the INET module.
    1599             :  *
    1600             :  *      @name:  This is the first field of the "visible" part of this structure
    1601             :  *              (i.e. as seen by users in the "Space.c" file).  It is the name
    1602             :  *              of the interface.
    1603             :  *
    1604             :  *      @name_node:     Name hashlist node
    1605             :  *      @ifalias:       SNMP alias
    1606             :  *      @mem_end:       Shared memory end
    1607             :  *      @mem_start:     Shared memory start
    1608             :  *      @base_addr:     Device I/O address
    1609             :  *      @irq:           Device IRQ number
    1610             :  *
    1611             :  *      @state:         Generic network queuing layer state, see netdev_state_t
    1612             :  *      @dev_list:      The global list of network devices
    1613             :  *      @napi_list:     List entry used for polling NAPI devices
    1614             :  *      @unreg_list:    List entry  when we are unregistering the
    1615             :  *                      device; see the function unregister_netdev
    1616             :  *      @close_list:    List entry used when we are closing the device
    1617             :  *      @ptype_all:     Device-specific packet handlers for all protocols
    1618             :  *      @ptype_specific: Device-specific, protocol-specific packet handlers
    1619             :  *
    1620             :  *      @adj_list:      Directly linked devices, like slaves for bonding
    1621             :  *      @features:      Currently active device features
    1622             :  *      @hw_features:   User-changeable features
    1623             :  *
    1624             :  *      @wanted_features:       User-requested features
    1625             :  *      @vlan_features:         Mask of features inheritable by VLAN devices
    1626             :  *
    1627             :  *      @hw_enc_features:       Mask of features inherited by encapsulating devices
    1628             :  *                              This field indicates what encapsulation
    1629             :  *                              offloads the hardware is capable of doing,
    1630             :  *                              and drivers will need to set them appropriately.
    1631             :  *
    1632             :  *      @mpls_features: Mask of features inheritable by MPLS
    1633             :  *      @gso_partial_features: value(s) from NETIF_F_GSO\*
    1634             :  *
    1635             :  *      @ifindex:       interface index
    1636             :  *      @group:         The group the device belongs to
    1637             :  *
    1638             :  *      @stats:         Statistics struct, which was left as a legacy, use
    1639             :  *                      rtnl_link_stats64 instead
    1640             :  *
    1641             :  *      @rx_dropped:    Dropped packets by core network,
    1642             :  *                      do not use this in drivers
    1643             :  *      @tx_dropped:    Dropped packets by core network,
    1644             :  *                      do not use this in drivers
    1645             :  *      @rx_nohandler:  nohandler dropped packets by core network on
    1646             :  *                      inactive devices, do not use this in drivers
    1647             :  *      @carrier_up_count:      Number of times the carrier has been up
    1648             :  *      @carrier_down_count:    Number of times the carrier has been down
    1649             :  *
    1650             :  *      @wireless_handlers:     List of functions to handle Wireless Extensions,
    1651             :  *                              instead of ioctl,
    1652             :  *                              see <net/iw_handler.h> for details.
    1653             :  *      @wireless_data: Instance data managed by the core of wireless extensions
    1654             :  *
    1655             :  *      @netdev_ops:    Includes several pointers to callbacks,
    1656             :  *                      if one wants to override the ndo_*() functions
    1657             :  *      @ethtool_ops:   Management operations
    1658             :  *      @l3mdev_ops:    Layer 3 master device operations
    1659             :  *      @ndisc_ops:     Includes callbacks for different IPv6 neighbour
    1660             :  *                      discovery handling. Necessary for e.g. 6LoWPAN.
    1661             :  *      @xfrmdev_ops:   Transformation offload operations
    1662             :  *      @tlsdev_ops:    Transport Layer Security offload operations
    1663             :  *      @header_ops:    Includes callbacks for creating,parsing,caching,etc
    1664             :  *                      of Layer 2 headers.
    1665             :  *
    1666             :  *      @flags:         Interface flags (a la BSD)
    1667             :  *      @priv_flags:    Like 'flags' but invisible to userspace,
    1668             :  *                      see if.h for the definitions
    1669             :  *      @gflags:        Global flags ( kept as legacy )
    1670             :  *      @padded:        How much padding added by alloc_netdev()
    1671             :  *      @operstate:     RFC2863 operstate
    1672             :  *      @link_mode:     Mapping policy to operstate
    1673             :  *      @if_port:       Selectable AUI, TP, ...
    1674             :  *      @dma:           DMA channel
    1675             :  *      @mtu:           Interface MTU value
    1676             :  *      @min_mtu:       Interface Minimum MTU value
    1677             :  *      @max_mtu:       Interface Maximum MTU value
    1678             :  *      @type:          Interface hardware type
    1679             :  *      @hard_header_len: Maximum hardware header length.
    1680             :  *      @min_header_len:  Minimum hardware header length
    1681             :  *
    1682             :  *      @needed_headroom: Extra headroom the hardware may need, but not in all
    1683             :  *                        cases can this be guaranteed
    1684             :  *      @needed_tailroom: Extra tailroom the hardware may need, but not in all
    1685             :  *                        cases can this be guaranteed. Some cases also use
    1686             :  *                        LL_MAX_HEADER instead to allocate the skb
    1687             :  *
    1688             :  *      interface address info:
    1689             :  *
    1690             :  *      @perm_addr:             Permanent hw address
    1691             :  *      @addr_assign_type:      Hw address assignment type
    1692             :  *      @addr_len:              Hardware address length
    1693             :  *      @upper_level:           Maximum depth level of upper devices.
    1694             :  *      @lower_level:           Maximum depth level of lower devices.
    1695             :  *      @neigh_priv_len:        Used in neigh_alloc()
    1696             :  *      @dev_id:                Used to differentiate devices that share
    1697             :  *                              the same link layer address
    1698             :  *      @dev_port:              Used to differentiate devices that share
    1699             :  *                              the same function
    1700             :  *      @addr_list_lock:        XXX: need comments on this one
    1701             :  *      @name_assign_type:      network interface name assignment type
    1702             :  *      @uc_promisc:            Counter that indicates promiscuous mode
    1703             :  *                              has been enabled due to the need to listen to
    1704             :  *                              additional unicast addresses in a device that
    1705             :  *                              does not implement ndo_set_rx_mode()
    1706             :  *      @uc:                    unicast mac addresses
    1707             :  *      @mc:                    multicast mac addresses
    1708             :  *      @dev_addrs:             list of device hw addresses
    1709             :  *      @queues_kset:           Group of all Kobjects in the Tx and RX queues
    1710             :  *      @promiscuity:           Number of times the NIC is told to work in
    1711             :  *                              promiscuous mode; if it becomes 0 the NIC will
    1712             :  *                              exit promiscuous mode
    1713             :  *      @allmulti:              Counter, enables or disables allmulticast mode
    1714             :  *
    1715             :  *      @vlan_info:     VLAN info
    1716             :  *      @dsa_ptr:       dsa specific data
    1717             :  *      @tipc_ptr:      TIPC specific data
    1718             :  *      @atalk_ptr:     AppleTalk link
    1719             :  *      @ip_ptr:        IPv4 specific data
    1720             :  *      @dn_ptr:        DECnet specific data
    1721             :  *      @ip6_ptr:       IPv6 specific data
    1722             :  *      @ax25_ptr:      AX.25 specific data
    1723             :  *      @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
    1724             :  *      @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
    1725             :  *                       device struct
    1726             :  *      @mpls_ptr:      mpls_dev struct pointer
    1727             :  *
    1728             :  *      @dev_addr:      Hw address (before bcast,
    1729             :  *                      because most packets are unicast)
    1730             :  *
    1731             :  *      @_rx:                   Array of RX queues
    1732             :  *      @num_rx_queues:         Number of RX queues
    1733             :  *                              allocated at register_netdev() time
    1734             :  *      @real_num_rx_queues:    Number of RX queues currently active in device
    1735             :  *      @xdp_prog:              XDP sockets filter program pointer
    1736             :  *      @gro_flush_timeout:     timeout for GRO layer in NAPI
    1737             :  *      @napi_defer_hard_irqs:  If not zero, provides a counter that would
    1738             :  *                              allow to avoid NIC hard IRQ, on busy queues.
    1739             :  *
    1740             :  *      @rx_handler:            handler for received packets
    1741             :  *      @rx_handler_data:       XXX: need comments on this one
    1742             :  *      @miniq_ingress:         ingress/clsact qdisc specific data for
    1743             :  *                              ingress processing
    1744             :  *      @ingress_queue:         XXX: need comments on this one
    1745             :  *      @nf_hooks_ingress:      netfilter hooks executed for ingress packets
    1746             :  *      @broadcast:             hw bcast address
    1747             :  *
    1748             :  *      @rx_cpu_rmap:   CPU reverse-mapping for RX completion interrupts,
    1749             :  *                      indexed by RX queue number. Assigned by driver.
    1750             :  *                      This must only be set if the ndo_rx_flow_steer
    1751             :  *                      operation is defined
    1752             :  *      @index_hlist:           Device index hash chain
    1753             :  *
    1754             :  *      @_tx:                   Array of TX queues
    1755             :  *      @num_tx_queues:         Number of TX queues allocated at alloc_netdev_mq() time
    1756             :  *      @real_num_tx_queues:    Number of TX queues currently active in device
    1757             :  *      @qdisc:                 Root qdisc from userspace point of view
    1758             :  *      @tx_queue_len:          Max frames per queue allowed
    1759             :  *      @tx_global_lock:        XXX: need comments on this one
    1760             :  *      @xdp_bulkq:             XDP device bulk queue
    1761             :  *      @xps_cpus_map:          all CPUs map for XPS device
    1762             :  *      @xps_rxqs_map:          all RXQs map for XPS device
    1763             :  *
    1764             :  *      @xps_maps:      XXX: need comments on this one
    1765             :  *      @miniq_egress:          clsact qdisc specific data for
    1766             :  *                              egress processing
    1767             :  *      @qdisc_hash:            qdisc hash table
    1768             :  *      @watchdog_timeo:        Represents the timeout that is used by
    1769             :  *                              the watchdog (see dev_watchdog())
    1770             :  *      @watchdog_timer:        List of timers
    1771             :  *
    1772             :  *      @proto_down_reason:     reason a netdev interface is held down
    1773             :  *      @pcpu_refcnt:           Number of references to this device
    1774             :  *      @todo_list:             Delayed register/unregister
    1775             :  *      @link_watch_list:       XXX: need comments on this one
    1776             :  *
    1777             :  *      @reg_state:             Register/unregister state machine
    1778             :  *      @dismantle:             Device is going to be freed
    1779             :  *      @rtnl_link_state:       This enum represents the phases of creating
    1780             :  *                              a new link
    1781             :  *
    1782             :  *      @needs_free_netdev:     Should unregister perform free_netdev?
    1783             :  *      @priv_destructor:       Called from unregister
    1784             :  *      @npinfo:                XXX: need comments on this one
    1785             :  *      @nd_net:                Network namespace this network device is inside
    1786             :  *
    1787             :  *      @ml_priv:       Mid-layer private
    1788             :  *      @ml_priv_type:  Mid-layer private type
    1789             :  *      @lstats:        Loopback statistics
    1790             :  *      @tstats:        Tunnel statistics
    1791             :  *      @dstats:        Dummy statistics
    1792             :  *      @vstats:        Virtual ethernet statistics
    1793             :  *
    1794             :  *      @garp_port:     GARP
    1795             :  *      @mrp_port:      MRP
    1796             :  *
    1797             :  *      @dev:           Class/net/name entry
    1798             :  *      @sysfs_groups:  Space for optional device, statistics and wireless
    1799             :  *                      sysfs groups
    1800             :  *
    1801             :  *      @sysfs_rx_queue_group:  Space for optional per-rx queue attributes
    1802             :  *      @rtnl_link_ops: Rtnl_link_ops
    1803             :  *
    1804             :  *      @gso_max_size:  Maximum size of generic segmentation offload
    1805             :  *      @gso_max_segs:  Maximum number of segments that can be passed to the
    1806             :  *                      NIC for GSO
    1807             :  *
    1808             :  *      @dcbnl_ops:     Data Center Bridging netlink ops
    1809             :  *      @num_tc:        Number of traffic classes in the net device
    1810             :  *      @tc_to_txq:     XXX: need comments on this one
    1811             :  *      @prio_tc_map:   XXX: need comments on this one
    1812             :  *
    1813             :  *      @fcoe_ddp_xid:  Max exchange id for FCoE LRO by ddp
    1814             :  *
    1815             :  *      @priomap:       XXX: need comments on this one
    1816             :  *      @phydev:        Physical device may attach itself
    1817             :  *                      for hardware timestamping
    1818             :  *      @sfp_bus:       attached &struct sfp_bus structure.
    1819             :  *
    1820             :  *      @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
    1821             :  *      @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
    1822             :  *
    1823             :  *      @proto_down:    protocol port state information can be sent to the
    1824             :  *                      switch driver and used to set the phys state of the
    1825             :  *                      switch port.
    1826             :  *
    1827             :  *      @wol_enabled:   Wake-on-LAN is enabled
    1828             :  *
    1829             :  *      @threaded:      napi threaded mode is enabled
    1830             :  *
    1831             :  *      @net_notifier_list:     List of per-net netdev notifier block
    1832             :  *                              that follow this device when it is moved
    1833             :  *                              to another network namespace.
    1834             :  *
    1835             :  *      @macsec_ops:    MACsec offloading ops
    1836             :  *
    1837             :  *      @udp_tunnel_nic_info:   static structure describing the UDP tunnel
    1838             :  *                              offload capabilities of the device
    1839             :  *      @udp_tunnel_nic:        UDP tunnel offload state
    1840             :  *      @xdp_state:             stores info on attached XDP BPF programs
    1841             :  *
    1842             :  *      @nested_level:  Used as as a parameter of spin_lock_nested() of
    1843             :  *                      dev->addr_list_lock.
    1844             :  *      @unlink_list:   As netif_addr_lock() can be called recursively,
    1845             :  *                      keep a list of interfaces to be deleted.
    1846             :  *
    1847             :  *      FIXME: cleanup struct net_device such that network protocol info
    1848             :  *      moves out.
    1849             :  */
    1850             : 
    1851             : struct net_device {
    1852             :         char                    name[IFNAMSIZ];
    1853             :         struct netdev_name_node *name_node;
    1854             :         struct dev_ifalias      __rcu *ifalias;
    1855             :         /*
    1856             :          *      I/O specific fields
    1857             :          *      FIXME: Merge these and struct ifmap into one
    1858             :          */
    1859             :         unsigned long           mem_end;
    1860             :         unsigned long           mem_start;
    1861             :         unsigned long           base_addr;
    1862             : 
    1863             :         /*
    1864             :          *      Some hardware also needs these fields (state,dev_list,
    1865             :          *      napi_list,unreg_list,close_list) but they are not
    1866             :          *      part of the usual set specified in Space.c.
    1867             :          */
    1868             : 
    1869             :         unsigned long           state;
    1870             : 
    1871             :         struct list_head        dev_list;
    1872             :         struct list_head        napi_list;
    1873             :         struct list_head        unreg_list;
    1874             :         struct list_head        close_list;
    1875             :         struct list_head        ptype_all;
    1876             :         struct list_head        ptype_specific;
    1877             : 
    1878             :         struct {
    1879             :                 struct list_head upper;
    1880             :                 struct list_head lower;
    1881             :         } adj_list;
    1882             : 
    1883             :         /* Read-mostly cache-line for fast-path access */
    1884             :         unsigned int            flags;
    1885             :         unsigned int            priv_flags;
    1886             :         const struct net_device_ops *netdev_ops;
    1887             :         int                     ifindex;
    1888             :         unsigned short          gflags;
    1889             :         unsigned short          hard_header_len;
    1890             : 
    1891             :         /* Note : dev->mtu is often read without holding a lock.
    1892             :          * Writers usually hold RTNL.
    1893             :          * It is recommended to use READ_ONCE() to annotate the reads,
    1894             :          * and to use WRITE_ONCE() to annotate the writes.
    1895             :          */
    1896             :         unsigned int            mtu;
    1897             :         unsigned short          needed_headroom;
    1898             :         unsigned short          needed_tailroom;
    1899             : 
    1900             :         netdev_features_t       features;
    1901             :         netdev_features_t       hw_features;
    1902             :         netdev_features_t       wanted_features;
    1903             :         netdev_features_t       vlan_features;
    1904             :         netdev_features_t       hw_enc_features;
    1905             :         netdev_features_t       mpls_features;
    1906             :         netdev_features_t       gso_partial_features;
    1907             : 
    1908             :         unsigned int            min_mtu;
    1909             :         unsigned int            max_mtu;
    1910             :         unsigned short          type;
    1911             :         unsigned char           min_header_len;
    1912             :         unsigned char           name_assign_type;
    1913             : 
    1914             :         int                     group;
    1915             : 
    1916             :         struct net_device_stats stats; /* not used by modern drivers */
    1917             : 
    1918             :         atomic_long_t           rx_dropped;
    1919             :         atomic_long_t           tx_dropped;
    1920             :         atomic_long_t           rx_nohandler;
    1921             : 
    1922             :         /* Stats to monitor link on/off, flapping */
    1923             :         atomic_t                carrier_up_count;
    1924             :         atomic_t                carrier_down_count;
    1925             : 
    1926             : #ifdef CONFIG_WIRELESS_EXT
    1927             :         const struct iw_handler_def *wireless_handlers;
    1928             :         struct iw_public_data   *wireless_data;
    1929             : #endif
    1930             :         const struct ethtool_ops *ethtool_ops;
    1931             : #ifdef CONFIG_NET_L3_MASTER_DEV
    1932             :         const struct l3mdev_ops *l3mdev_ops;
    1933             : #endif
    1934             : #if IS_ENABLED(CONFIG_IPV6)
    1935             :         const struct ndisc_ops *ndisc_ops;
    1936             : #endif
    1937             : 
    1938             : #ifdef CONFIG_XFRM_OFFLOAD
    1939             :         const struct xfrmdev_ops *xfrmdev_ops;
    1940             : #endif
    1941             : 
    1942             : #if IS_ENABLED(CONFIG_TLS_DEVICE)
    1943             :         const struct tlsdev_ops *tlsdev_ops;
    1944             : #endif
    1945             : 
    1946             :         const struct header_ops *header_ops;
    1947             : 
    1948             :         unsigned char           operstate;
    1949             :         unsigned char           link_mode;
    1950             : 
    1951             :         unsigned char           if_port;
    1952             :         unsigned char           dma;
    1953             : 
    1954             :         /* Interface address info. */
    1955             :         unsigned char           perm_addr[MAX_ADDR_LEN];
    1956             :         unsigned char           addr_assign_type;
    1957             :         unsigned char           addr_len;
    1958             :         unsigned char           upper_level;
    1959             :         unsigned char           lower_level;
    1960             : 
    1961             :         unsigned short          neigh_priv_len;
    1962             :         unsigned short          dev_id;
    1963             :         unsigned short          dev_port;
    1964             :         unsigned short          padded;
    1965             : 
    1966             :         spinlock_t              addr_list_lock;
    1967             :         int                     irq;
    1968             : 
    1969             :         struct netdev_hw_addr_list      uc;
    1970             :         struct netdev_hw_addr_list      mc;
    1971             :         struct netdev_hw_addr_list      dev_addrs;
    1972             : 
    1973             : #ifdef CONFIG_SYSFS
    1974             :         struct kset             *queues_kset;
    1975             : #endif
    1976             : #ifdef CONFIG_LOCKDEP
    1977             :         struct list_head        unlink_list;
    1978             : #endif
    1979             :         unsigned int            promiscuity;
    1980             :         unsigned int            allmulti;
    1981             :         bool                    uc_promisc;
    1982             : #ifdef CONFIG_LOCKDEP
    1983             :         unsigned char           nested_level;
    1984             : #endif
    1985             : 
    1986             : 
    1987             :         /* Protocol-specific pointers */
    1988             : 
    1989             : #if IS_ENABLED(CONFIG_VLAN_8021Q)
    1990             :         struct vlan_info __rcu  *vlan_info;
    1991             : #endif
    1992             : #if IS_ENABLED(CONFIG_NET_DSA)
    1993             :         struct dsa_port         *dsa_ptr;
    1994             : #endif
    1995             : #if IS_ENABLED(CONFIG_TIPC)
    1996             :         struct tipc_bearer __rcu *tipc_ptr;
    1997             : #endif
    1998             : #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
    1999             :         void                    *atalk_ptr;
    2000             : #endif
    2001             :         struct in_device __rcu  *ip_ptr;
    2002             : #if IS_ENABLED(CONFIG_DECNET)
    2003             :         struct dn_dev __rcu     *dn_ptr;
    2004             : #endif
    2005             :         struct inet6_dev __rcu  *ip6_ptr;
    2006             : #if IS_ENABLED(CONFIG_AX25)
    2007             :         void                    *ax25_ptr;
    2008             : #endif
    2009             :         struct wireless_dev     *ieee80211_ptr;
    2010             :         struct wpan_dev         *ieee802154_ptr;
    2011             : #if IS_ENABLED(CONFIG_MPLS_ROUTING)
    2012             :         struct mpls_dev __rcu   *mpls_ptr;
    2013             : #endif
    2014             : 
    2015             : /*
    2016             :  * Cache lines mostly used on receive path (including eth_type_trans())
    2017             :  */
    2018             :         /* Interface address info used in eth_type_trans() */
    2019             :         unsigned char           *dev_addr;
    2020             : 
    2021             :         struct netdev_rx_queue  *_rx;
    2022             :         unsigned int            num_rx_queues;
    2023             :         unsigned int            real_num_rx_queues;
    2024             : 
    2025             :         struct bpf_prog __rcu   *xdp_prog;
    2026             :         unsigned long           gro_flush_timeout;
    2027             :         int                     napi_defer_hard_irqs;
    2028             :         rx_handler_func_t __rcu *rx_handler;
    2029             :         void __rcu              *rx_handler_data;
    2030             : 
    2031             : #ifdef CONFIG_NET_CLS_ACT
    2032             :         struct mini_Qdisc __rcu *miniq_ingress;
    2033             : #endif
    2034             :         struct netdev_queue __rcu *ingress_queue;
    2035             : #ifdef CONFIG_NETFILTER_INGRESS
    2036             :         struct nf_hook_entries __rcu *nf_hooks_ingress;
    2037             : #endif
    2038             : 
    2039             :         unsigned char           broadcast[MAX_ADDR_LEN];
    2040             : #ifdef CONFIG_RFS_ACCEL
    2041             :         struct cpu_rmap         *rx_cpu_rmap;
    2042             : #endif
    2043             :         struct hlist_node       index_hlist;
    2044             : 
    2045             : /*
    2046             :  * Cache lines mostly used on transmit path
    2047             :  */
    2048             :         struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
    2049             :         unsigned int            num_tx_queues;
    2050             :         unsigned int            real_num_tx_queues;
    2051             :         struct Qdisc            *qdisc;
    2052             :         unsigned int            tx_queue_len;
    2053             :         spinlock_t              tx_global_lock;
    2054             : 
    2055             :         struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
    2056             : 
    2057             : #ifdef CONFIG_XPS
    2058             :         struct xps_dev_maps __rcu *xps_cpus_map;
    2059             :         struct xps_dev_maps __rcu *xps_rxqs_map;
    2060             : #endif
    2061             : #ifdef CONFIG_NET_CLS_ACT
    2062             :         struct mini_Qdisc __rcu *miniq_egress;
    2063             : #endif
    2064             : 
    2065             : #ifdef CONFIG_NET_SCHED
    2066             :         DECLARE_HASHTABLE       (qdisc_hash, 4);
    2067             : #endif
    2068             :         /* These may be needed for future network-power-down code. */
    2069             :         struct timer_list       watchdog_timer;
    2070             :         int                     watchdog_timeo;
    2071             : 
    2072             :         u32                     proto_down_reason;
    2073             : 
    2074             :         struct list_head        todo_list;
    2075             :         int __percpu            *pcpu_refcnt;
    2076             : 
    2077             :         struct list_head        link_watch_list;
    2078             : 
    2079             :         enum { NETREG_UNINITIALIZED=0,
    2080             :                NETREG_REGISTERED,       /* completed register_netdevice */
    2081             :                NETREG_UNREGISTERING,    /* called unregister_netdevice */
    2082             :                NETREG_UNREGISTERED,     /* completed unregister todo */
    2083             :                NETREG_RELEASED,         /* called free_netdev */
    2084             :                NETREG_DUMMY,            /* dummy device for NAPI poll */
    2085             :         } reg_state:8;
    2086             : 
    2087             :         bool dismantle;
    2088             : 
    2089             :         enum {
    2090             :                 RTNL_LINK_INITIALIZED,
    2091             :                 RTNL_LINK_INITIALIZING,
    2092             :         } rtnl_link_state:16;
    2093             : 
    2094             :         bool needs_free_netdev;
    2095             :         void (*priv_destructor)(struct net_device *dev);
    2096             : 
    2097             : #ifdef CONFIG_NETPOLL
    2098             :         struct netpoll_info __rcu       *npinfo;
    2099             : #endif
    2100             : 
    2101             :         possible_net_t                  nd_net;
    2102             : 
    2103             :         /* mid-layer private */
    2104             :         void                            *ml_priv;
    2105             :         enum netdev_ml_priv_type        ml_priv_type;
    2106             : 
    2107             :         union {
    2108             :                 struct pcpu_lstats __percpu             *lstats;
    2109             :                 struct pcpu_sw_netstats __percpu        *tstats;
    2110             :                 struct pcpu_dstats __percpu             *dstats;
    2111             :         };
    2112             : 
    2113             : #if IS_ENABLED(CONFIG_GARP)
    2114             :         struct garp_port __rcu  *garp_port;
    2115             : #endif
    2116             : #if IS_ENABLED(CONFIG_MRP)
    2117             :         struct mrp_port __rcu   *mrp_port;
    2118             : #endif
    2119             : 
    2120             :         struct device           dev;
    2121             :         const struct attribute_group *sysfs_groups[4];
    2122             :         const struct attribute_group *sysfs_rx_queue_group;
    2123             : 
    2124             :         const struct rtnl_link_ops *rtnl_link_ops;
    2125             : 
    2126             :         /* for setting kernel sock attribute on TCP connection setup */
    2127             : #define GSO_MAX_SIZE            65536
    2128             :         unsigned int            gso_max_size;
    2129             : #define GSO_MAX_SEGS            65535
    2130             :         u16                     gso_max_segs;
    2131             : 
    2132             : #ifdef CONFIG_DCB
    2133             :         const struct dcbnl_rtnl_ops *dcbnl_ops;
    2134             : #endif
    2135             :         s16                     num_tc;
    2136             :         struct netdev_tc_txq    tc_to_txq[TC_MAX_QUEUE];
    2137             :         u8                      prio_tc_map[TC_BITMASK + 1];
    2138             : 
    2139             : #if IS_ENABLED(CONFIG_FCOE)
    2140             :         unsigned int            fcoe_ddp_xid;
    2141             : #endif
    2142             : #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
    2143             :         struct netprio_map __rcu *priomap;
    2144             : #endif
    2145             :         struct phy_device       *phydev;
    2146             :         struct sfp_bus          *sfp_bus;
    2147             :         struct lock_class_key   *qdisc_tx_busylock;
    2148             :         struct lock_class_key   *qdisc_running_key;
    2149             :         bool                    proto_down;
    2150             :         unsigned                wol_enabled:1;
    2151             :         unsigned                threaded:1;
    2152             : 
    2153             :         struct list_head        net_notifier_list;
    2154             : 
    2155             : #if IS_ENABLED(CONFIG_MACSEC)
    2156             :         /* MACsec management functions */
    2157             :         const struct macsec_ops *macsec_ops;
    2158             : #endif
    2159             :         const struct udp_tunnel_nic_info        *udp_tunnel_nic_info;
    2160             :         struct udp_tunnel_nic   *udp_tunnel_nic;
    2161             : 
    2162             :         /* protected by rtnl_lock */
    2163             :         struct bpf_xdp_entity   xdp_state[__MAX_XDP_MODE];
    2164             : };
    2165             : #define to_net_dev(d) container_of(d, struct net_device, dev)
    2166             : 
    2167         723 : static inline bool netif_elide_gro(const struct net_device *dev)
    2168             : {
    2169         723 :         if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
    2170           0 :                 return true;
    2171             :         return false;
    2172             : }
    2173             : 
    2174             : #define NETDEV_ALIGN            32
    2175             : 
    2176             : static inline
    2177           0 : int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
    2178             : {
    2179           0 :         return dev->prio_tc_map[prio & TC_BITMASK];
    2180             : }
    2181             : 
    2182             : static inline
    2183           0 : int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
    2184             : {
    2185           0 :         if (tc >= dev->num_tc)
    2186             :                 return -EINVAL;
    2187             : 
    2188           0 :         dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
    2189           0 :         return 0;
    2190             : }
    2191             : 
    2192             : int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
    2193             : void netdev_reset_tc(struct net_device *dev);
    2194             : int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
    2195             : int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
    2196             : 
    2197             : static inline
    2198             : int netdev_get_num_tc(struct net_device *dev)
    2199             : {
    2200             :         return dev->num_tc;
    2201             : }
    2202             : 
    2203             : static inline void net_prefetch(void *p)
    2204             : {
    2205             :         prefetch(p);
    2206             : #if L1_CACHE_BYTES < 128
    2207             :         prefetch((u8 *)p + L1_CACHE_BYTES);
    2208             : #endif
    2209             : }
    2210             : 
    2211             : static inline void net_prefetchw(void *p)
    2212             : {
    2213             :         prefetchw(p);
    2214             : #if L1_CACHE_BYTES < 128
    2215             :         prefetchw((u8 *)p + L1_CACHE_BYTES);
    2216             : #endif
    2217             : }
    2218             : 
    2219             : void netdev_unbind_sb_channel(struct net_device *dev,
    2220             :                               struct net_device *sb_dev);
    2221             : int netdev_bind_sb_channel_queue(struct net_device *dev,
    2222             :                                  struct net_device *sb_dev,
    2223             :                                  u8 tc, u16 count, u16 offset);
    2224             : int netdev_set_sb_channel(struct net_device *dev, u16 channel);
    2225             : static inline int netdev_get_sb_channel(struct net_device *dev)
    2226             : {
    2227             :         return max_t(int, -dev->num_tc, 0);
    2228             : }
    2229             : 
    2230             : static inline
    2231        2207 : struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
    2232             :                                          unsigned int index)
    2233             : {
    2234        1755 :         return &dev->_tx[index];
    2235             : }
    2236             : 
    2237         448 : static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
    2238             :                                                     const struct sk_buff *skb)
    2239             : {
    2240         448 :         return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
    2241             : }
    2242             : 
    2243          12 : static inline void netdev_for_each_tx_queue(struct net_device *dev,
    2244             :                                             void (*f)(struct net_device *,
    2245             :                                                       struct netdev_queue *,
    2246             :                                                       void *),
    2247             :                                             void *arg)
    2248             : {
    2249          12 :         unsigned int i;
    2250             : 
    2251          24 :         for (i = 0; i < dev->num_tx_queues; i++)
    2252          12 :                 f(dev, &dev->_tx[i], arg);
    2253          12 : }
    2254             : 
    2255             : #define netdev_lockdep_set_classes(dev)                         \
    2256             : {                                                               \
    2257             :         static struct lock_class_key qdisc_tx_busylock_key;     \
    2258             :         static struct lock_class_key qdisc_running_key;         \
    2259             :         static struct lock_class_key qdisc_xmit_lock_key;       \
    2260             :         static struct lock_class_key dev_addr_list_lock_key;    \
    2261             :         unsigned int i;                                         \
    2262             :                                                                 \
    2263             :         (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;       \
    2264             :         (dev)->qdisc_running_key = &qdisc_running_key;           \
    2265             :         lockdep_set_class(&(dev)->addr_list_lock,                \
    2266             :                           &dev_addr_list_lock_key);         \
    2267             :         for (i = 0; i < (dev)->num_tx_queues; i++)                \
    2268             :                 lockdep_set_class(&(dev)->_tx[i]._xmit_lock,     \
    2269             :                                   &qdisc_xmit_lock_key);    \
    2270             : }
    2271             : 
    2272             : u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
    2273             :                      struct net_device *sb_dev);
    2274             : struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
    2275             :                                          struct sk_buff *skb,
    2276             :                                          struct net_device *sb_dev);
    2277             : 
    2278             : /* returns the headroom that the master device needs to take in account
    2279             :  * when forwarding to this dev
    2280             :  */
    2281             : static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
    2282             : {
    2283             :         return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
    2284             : }
    2285             : 
    2286             : static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
    2287             : {
    2288             :         if (dev->netdev_ops->ndo_set_rx_headroom)
    2289             :                 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
    2290             : }
    2291             : 
    2292             : /* set the device rx headroom to the dev's default */
    2293             : static inline void netdev_reset_rx_headroom(struct net_device *dev)
    2294             : {
    2295             :         netdev_set_rx_headroom(dev, -1);
    2296             : }
    2297             : 
    2298             : static inline void *netdev_get_ml_priv(struct net_device *dev,
    2299             :                                        enum netdev_ml_priv_type type)
    2300             : {
    2301             :         if (dev->ml_priv_type != type)
    2302             :                 return NULL;
    2303             : 
    2304             :         return dev->ml_priv;
    2305             : }
    2306             : 
    2307             : static inline void netdev_set_ml_priv(struct net_device *dev,
    2308             :                                       void *ml_priv,
    2309             :                                       enum netdev_ml_priv_type type)
    2310             : {
    2311             :         WARN(dev->ml_priv_type && dev->ml_priv_type != type,
    2312             :              "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
    2313             :              dev->ml_priv_type, type);
    2314             :         WARN(!dev->ml_priv_type && dev->ml_priv,
    2315             :              "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
    2316             : 
    2317             :         dev->ml_priv = ml_priv;
    2318             :         dev->ml_priv_type = type;
    2319             : }
    2320             : 
    2321             : /*
    2322             :  * Net namespace inlines
    2323             :  */
    2324             : static inline
    2325        5479 : struct net *dev_net(const struct net_device *dev)
    2326             : {
    2327        5450 :         return read_pnet(&dev->nd_net);
    2328             : }
    2329             : 
    2330             : static inline
    2331           5 : void dev_net_set(struct net_device *dev, struct net *net)
    2332             : {
    2333           5 :         write_pnet(&dev->nd_net, net);
    2334             : }
    2335             : 
    2336             : /**
    2337             :  *      netdev_priv - access network device private data
    2338             :  *      @dev: network device
    2339             :  *
    2340             :  * Get network device private data
    2341             :  */
    2342         474 : static inline void *netdev_priv(const struct net_device *dev)
    2343             : {
    2344         474 :         return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
    2345             : }
    2346             : 
    2347             : /* Set the sysfs physical device reference for the network logical device
    2348             :  * if set prior to registration will cause a symlink during initialization.
    2349             :  */
    2350             : #define SET_NETDEV_DEV(net, pdev)       ((net)->dev.parent = (pdev))
    2351             : 
    2352             : /* Set the sysfs device type for the network logical device to allow
    2353             :  * fine-grained identification of different network device types. For
    2354             :  * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
    2355             :  */
    2356             : #define SET_NETDEV_DEVTYPE(net, devtype)        ((net)->dev.type = (devtype))
    2357             : 
    2358             : /* Default NAPI poll() weight
    2359             :  * Device drivers are strongly advised to not use bigger value
    2360             :  */
    2361             : #define NAPI_POLL_WEIGHT 64
    2362             : 
    2363             : /**
    2364             :  *      netif_napi_add - initialize a NAPI context
    2365             :  *      @dev:  network device
    2366             :  *      @napi: NAPI context
    2367             :  *      @poll: polling function
    2368             :  *      @weight: default weight
    2369             :  *
    2370             :  * netif_napi_add() must be used to initialize a NAPI context prior to calling
    2371             :  * *any* of the other NAPI-related functions.
    2372             :  */
    2373             : void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
    2374             :                     int (*poll)(struct napi_struct *, int), int weight);
    2375             : 
    2376             : /**
    2377             :  *      netif_tx_napi_add - initialize a NAPI context
    2378             :  *      @dev:  network device
    2379             :  *      @napi: NAPI context
    2380             :  *      @poll: polling function
    2381             :  *      @weight: default weight
    2382             :  *
    2383             :  * This variant of netif_napi_add() should be used from drivers using NAPI
    2384             :  * to exclusively poll a TX queue.
    2385             :  * This will avoid we add it into napi_hash[], thus polluting this hash table.
    2386             :  */
    2387           1 : static inline void netif_tx_napi_add(struct net_device *dev,
    2388             :                                      struct napi_struct *napi,
    2389             :                                      int (*poll)(struct napi_struct *, int),
    2390             :                                      int weight)
    2391             : {
    2392           1 :         set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
    2393           1 :         netif_napi_add(dev, napi, poll, weight);
    2394           1 : }
    2395             : 
    2396             : /**
    2397             :  *  __netif_napi_del - remove a NAPI context
    2398             :  *  @napi: NAPI context
    2399             :  *
    2400             :  * Warning: caller must observe RCU grace period before freeing memory
    2401             :  * containing @napi. Drivers might want to call this helper to combine
    2402             :  * all the needed RCU grace periods into a single one.
    2403             :  */
    2404             : void __netif_napi_del(struct napi_struct *napi);
    2405             : 
    2406             : /**
    2407             :  *  netif_napi_del - remove a NAPI context
    2408             :  *  @napi: NAPI context
    2409             :  *
    2410             :  *  netif_napi_del() removes a NAPI context from the network device NAPI list
    2411             :  */
    2412           0 : static inline void netif_napi_del(struct napi_struct *napi)
    2413             : {
    2414           0 :         __netif_napi_del(napi);
    2415           0 :         synchronize_net();
    2416             : }
    2417             : 
    2418             : struct napi_gro_cb {
    2419             :         /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
    2420             :         void    *frag0;
    2421             : 
    2422             :         /* Length of frag0. */
    2423             :         unsigned int frag0_len;
    2424             : 
    2425             :         /* This indicates where we are processing relative to skb->data. */
    2426             :         int     data_offset;
    2427             : 
    2428             :         /* This is non-zero if the packet cannot be merged with the new skb. */
    2429             :         u16     flush;
    2430             : 
    2431             :         /* Save the IP ID here and check when we get to the transport layer */
    2432             :         u16     flush_id;
    2433             : 
    2434             :         /* Number of segments aggregated. */
    2435             :         u16     count;
    2436             : 
    2437             :         /* Start offset for remote checksum offload */
    2438             :         u16     gro_remcsum_start;
    2439             : 
    2440             :         /* jiffies when first packet was created/queued */
    2441             :         unsigned long age;
    2442             : 
    2443             :         /* Used in ipv6_gro_receive() and foo-over-udp */
    2444             :         u16     proto;
    2445             : 
    2446             :         /* This is non-zero if the packet may be of the same flow. */
    2447             :         u8      same_flow:1;
    2448             : 
    2449             :         /* Used in tunnel GRO receive */
    2450             :         u8      encap_mark:1;
    2451             : 
    2452             :         /* GRO checksum is valid */
    2453             :         u8      csum_valid:1;
    2454             : 
    2455             :         /* Number of checksums via CHECKSUM_UNNECESSARY */
    2456             :         u8      csum_cnt:3;
    2457             : 
    2458             :         /* Free the skb? */
    2459             :         u8      free:2;
    2460             : #define NAPI_GRO_FREE             1
    2461             : #define NAPI_GRO_FREE_STOLEN_HEAD 2
    2462             : 
    2463             :         /* Used in foo-over-udp, set in udp[46]_gro_receive */
    2464             :         u8      is_ipv6:1;
    2465             : 
    2466             :         /* Used in GRE, set in fou/gue_gro_receive */
    2467             :         u8      is_fou:1;
    2468             : 
    2469             :         /* Used to determine if flush_id can be ignored */
    2470             :         u8      is_atomic:1;
    2471             : 
    2472             :         /* Number of gro_receive callbacks this packet already went through */
    2473             :         u8 recursion_counter:4;
    2474             : 
    2475             :         /* GRO is done by frag_list pointer chaining. */
    2476             :         u8      is_flist:1;
    2477             : 
    2478             :         /* used to support CHECKSUM_COMPLETE for tunneling protocols */
    2479             :         __wsum  csum;
    2480             : 
    2481             :         /* used in skb_gro_receive() slow path */
    2482             :         struct sk_buff *last;
    2483             : };
    2484             : 
    2485             : #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
    2486             : 
    2487             : #define GRO_RECURSION_LIMIT 15
    2488         707 : static inline int gro_recursion_inc_test(struct sk_buff *skb)
    2489             : {
    2490         707 :         return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
    2491             : }
    2492             : 
    2493             : typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
    2494           0 : static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
    2495             :                                                struct list_head *head,
    2496             :                                                struct sk_buff *skb)
    2497             : {
    2498           0 :         if (unlikely(gro_recursion_inc_test(skb))) {
    2499           0 :                 NAPI_GRO_CB(skb)->flush |= 1;
    2500           0 :                 return NULL;
    2501             :         }
    2502             : 
    2503           0 :         return cb(head, skb);
    2504             : }
    2505             : 
    2506             : typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
    2507             :                                             struct sk_buff *);
    2508           0 : static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
    2509             :                                                   struct sock *sk,
    2510             :                                                   struct list_head *head,
    2511             :                                                   struct sk_buff *skb)
    2512             : {
    2513           0 :         if (unlikely(gro_recursion_inc_test(skb))) {
    2514           0 :                 NAPI_GRO_CB(skb)->flush |= 1;
    2515           0 :                 return NULL;
    2516             :         }
    2517             : 
    2518           0 :         return cb(sk, head, skb);
    2519             : }
    2520             : 
    2521             : struct packet_type {
    2522             :         __be16                  type;   /* This is really htons(ether_type). */
    2523             :         bool                    ignore_outgoing;
    2524             :         struct net_device       *dev;   /* NULL is wildcarded here           */
    2525             :         int                     (*func) (struct sk_buff *,
    2526             :                                          struct net_device *,
    2527             :                                          struct packet_type *,
    2528             :                                          struct net_device *);
    2529             :         void                    (*list_func) (struct list_head *,
    2530             :                                               struct packet_type *,
    2531             :                                               struct net_device *);
    2532             :         bool                    (*id_match)(struct packet_type *ptype,
    2533             :                                             struct sock *sk);
    2534             :         void                    *af_packet_priv;
    2535             :         struct list_head        list;
    2536             : };
    2537             : 
    2538             : struct offload_callbacks {
    2539             :         struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
    2540             :                                                 netdev_features_t features);
    2541             :         struct sk_buff          *(*gro_receive)(struct list_head *head,
    2542             :                                                 struct sk_buff *skb);
    2543             :         int                     (*gro_complete)(struct sk_buff *skb, int nhoff);
    2544             : };
    2545             : 
    2546             : struct packet_offload {
    2547             :         __be16                   type;  /* This is really htons(ether_type). */
    2548             :         u16                      priority;
    2549             :         struct offload_callbacks callbacks;
    2550             :         struct list_head         list;
    2551             : };
    2552             : 
    2553             : /* often modified stats are per-CPU, other are shared (netdev->stats) */
    2554             : struct pcpu_sw_netstats {
    2555             :         u64     rx_packets;
    2556             :         u64     rx_bytes;
    2557             :         u64     tx_packets;
    2558             :         u64     tx_bytes;
    2559             :         struct u64_stats_sync   syncp;
    2560             : } __aligned(4 * sizeof(u64));
    2561             : 
    2562             : struct pcpu_lstats {
    2563             :         u64_stats_t packets;
    2564             :         u64_stats_t bytes;
    2565             :         struct u64_stats_sync syncp;
    2566             : } __aligned(2 * sizeof(u64));
    2567             : 
    2568             : void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
    2569             : 
    2570             : static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
    2571             : {
    2572             :         struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
    2573             : 
    2574             :         u64_stats_update_begin(&tstats->syncp);
    2575             :         tstats->rx_bytes += len;
    2576             :         tstats->rx_packets++;
    2577             :         u64_stats_update_end(&tstats->syncp);
    2578             : }
    2579             : 
    2580             : static inline void dev_sw_netstats_tx_add(struct net_device *dev,
    2581             :                                           unsigned int packets,
    2582             :                                           unsigned int len)
    2583             : {
    2584             :         struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
    2585             : 
    2586             :         u64_stats_update_begin(&tstats->syncp);
    2587             :         tstats->tx_bytes += len;
    2588             :         tstats->tx_packets += packets;
    2589             :         u64_stats_update_end(&tstats->syncp);
    2590             : }
    2591             : 
    2592           0 : static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
    2593             : {
    2594           0 :         struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
    2595             : 
    2596           0 :         u64_stats_update_begin(&lstats->syncp);
    2597           0 :         u64_stats_add(&lstats->bytes, len);
    2598           0 :         u64_stats_inc(&lstats->packets);
    2599           0 :         u64_stats_update_end(&lstats->syncp);
    2600           0 : }
    2601             : 
    2602             : #define __netdev_alloc_pcpu_stats(type, gfp)                            \
    2603             : ({                                                                      \
    2604             :         typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
    2605             :         if (pcpu_stats) {                                               \
    2606             :                 int __cpu;                                              \
    2607             :                 for_each_possible_cpu(__cpu) {                          \
    2608             :                         typeof(type) *stat;                             \
    2609             :                         stat = per_cpu_ptr(pcpu_stats, __cpu);          \
    2610             :                         u64_stats_init(&stat->syncp);                    \
    2611             :                 }                                                       \
    2612             :         }                                                               \
    2613             :         pcpu_stats;                                                     \
    2614             : })
    2615             : 
    2616             : #define netdev_alloc_pcpu_stats(type)                                   \
    2617             :         __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
    2618             : 
    2619             : #define devm_netdev_alloc_pcpu_stats(dev, type)                         \
    2620             : ({                                                                      \
    2621             :         typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
    2622             :         if (pcpu_stats) {                                               \
    2623             :                 int __cpu;                                              \
    2624             :                 for_each_possible_cpu(__cpu) {                          \
    2625             :                         typeof(type) *stat;                             \
    2626             :                         stat = per_cpu_ptr(pcpu_stats, __cpu);          \
    2627             :                         u64_stats_init(&stat->syncp);                    \
    2628             :                 }                                                       \
    2629             :         }                                                               \
    2630             :         pcpu_stats;                                                     \
    2631             : })
    2632             : 
    2633             : enum netdev_lag_tx_type {
    2634             :         NETDEV_LAG_TX_TYPE_UNKNOWN,
    2635             :         NETDEV_LAG_TX_TYPE_RANDOM,
    2636             :         NETDEV_LAG_TX_TYPE_BROADCAST,
    2637             :         NETDEV_LAG_TX_TYPE_ROUNDROBIN,
    2638             :         NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
    2639             :         NETDEV_LAG_TX_TYPE_HASH,
    2640             : };
    2641             : 
    2642             : enum netdev_lag_hash {
    2643             :         NETDEV_LAG_HASH_NONE,
    2644             :         NETDEV_LAG_HASH_L2,
    2645             :         NETDEV_LAG_HASH_L34,
    2646             :         NETDEV_LAG_HASH_L23,
    2647             :         NETDEV_LAG_HASH_E23,
    2648             :         NETDEV_LAG_HASH_E34,
    2649             :         NETDEV_LAG_HASH_VLAN_SRCMAC,
    2650             :         NETDEV_LAG_HASH_UNKNOWN,
    2651             : };
    2652             : 
    2653             : struct netdev_lag_upper_info {
    2654             :         enum netdev_lag_tx_type tx_type;
    2655             :         enum netdev_lag_hash hash_type;
    2656             : };
    2657             : 
    2658             : struct netdev_lag_lower_state_info {
    2659             :         u8 link_up : 1,
    2660             :            tx_enabled : 1;
    2661             : };
    2662             : 
    2663             : #include <linux/notifier.h>
    2664             : 
    2665             : /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
    2666             :  * and the rtnetlink notification exclusion list in rtnetlink_event() when
    2667             :  * adding new types.
    2668             :  */
    2669             : enum netdev_cmd {
    2670             :         NETDEV_UP       = 1,    /* For now you can't veto a device up/down */
    2671             :         NETDEV_DOWN,
    2672             :         NETDEV_REBOOT,          /* Tell a protocol stack a network interface
    2673             :                                    detected a hardware crash and restarted
    2674             :                                    - we can use this eg to kick tcp sessions
    2675             :                                    once done */
    2676             :         NETDEV_CHANGE,          /* Notify device state change */
    2677             :         NETDEV_REGISTER,
    2678             :         NETDEV_UNREGISTER,
    2679             :         NETDEV_CHANGEMTU,       /* notify after mtu change happened */
    2680             :         NETDEV_CHANGEADDR,      /* notify after the address change */
    2681             :         NETDEV_PRE_CHANGEADDR,  /* notify before the address change */
    2682             :         NETDEV_GOING_DOWN,
    2683             :         NETDEV_CHANGENAME,
    2684             :         NETDEV_FEAT_CHANGE,
    2685             :         NETDEV_BONDING_FAILOVER,
    2686             :         NETDEV_PRE_UP,
    2687             :         NETDEV_PRE_TYPE_CHANGE,
    2688             :         NETDEV_POST_TYPE_CHANGE,
    2689             :         NETDEV_POST_INIT,
    2690             :         NETDEV_RELEASE,
    2691             :         NETDEV_NOTIFY_PEERS,
    2692             :         NETDEV_JOIN,
    2693             :         NETDEV_CHANGEUPPER,
    2694             :         NETDEV_RESEND_IGMP,
    2695             :         NETDEV_PRECHANGEMTU,    /* notify before mtu change happened */
    2696             :         NETDEV_CHANGEINFODATA,
    2697             :         NETDEV_BONDING_INFO,
    2698             :         NETDEV_PRECHANGEUPPER,
    2699             :         NETDEV_CHANGELOWERSTATE,
    2700             :         NETDEV_UDP_TUNNEL_PUSH_INFO,
    2701             :         NETDEV_UDP_TUNNEL_DROP_INFO,
    2702             :         NETDEV_CHANGE_TX_QUEUE_LEN,
    2703             :         NETDEV_CVLAN_FILTER_PUSH_INFO,
    2704             :         NETDEV_CVLAN_FILTER_DROP_INFO,
    2705             :         NETDEV_SVLAN_FILTER_PUSH_INFO,
    2706             :         NETDEV_SVLAN_FILTER_DROP_INFO,
    2707             : };
    2708             : const char *netdev_cmd_to_name(enum netdev_cmd cmd);
    2709             : 
    2710             : int register_netdevice_notifier(struct notifier_block *nb);
    2711             : int unregister_netdevice_notifier(struct notifier_block *nb);
    2712             : int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
    2713             : int unregister_netdevice_notifier_net(struct net *net,
    2714             :                                       struct notifier_block *nb);
    2715             : int register_netdevice_notifier_dev_net(struct net_device *dev,
    2716             :                                         struct notifier_block *nb,
    2717             :                                         struct netdev_net_notifier *nn);
    2718             : int unregister_netdevice_notifier_dev_net(struct net_device *dev,
    2719             :                                           struct notifier_block *nb,
    2720             :                                           struct netdev_net_notifier *nn);
    2721             : 
    2722             : struct netdev_notifier_info {
    2723             :         struct net_device       *dev;
    2724             :         struct netlink_ext_ack  *extack;
    2725             : };
    2726             : 
    2727             : struct netdev_notifier_info_ext {
    2728             :         struct netdev_notifier_info info; /* must be first */
    2729             :         union {
    2730             :                 u32 mtu;
    2731             :         } ext;
    2732             : };
    2733             : 
    2734             : struct netdev_notifier_change_info {
    2735             :         struct netdev_notifier_info info; /* must be first */
    2736             :         unsigned int flags_changed;
    2737             : };
    2738             : 
    2739             : struct netdev_notifier_changeupper_info {
    2740             :         struct netdev_notifier_info info; /* must be first */
    2741             :         struct net_device *upper_dev; /* new upper dev */
    2742             :         bool master; /* is upper dev master */
    2743             :         bool linking; /* is the notification for link or unlink */
    2744             :         void *upper_info; /* upper dev info */
    2745             : };
    2746             : 
    2747             : struct netdev_notifier_changelowerstate_info {
    2748             :         struct netdev_notifier_info info; /* must be first */
    2749             :         void *lower_state_info; /* is lower dev state */
    2750             : };
    2751             : 
    2752             : struct netdev_notifier_pre_changeaddr_info {
    2753             :         struct netdev_notifier_info info; /* must be first */
    2754             :         const unsigned char *dev_addr;
    2755             : };
    2756             : 
    2757             : static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
    2758             :                                              struct net_device *dev)
    2759             : {
    2760             :         info->dev = dev;
    2761             :         info->extack = NULL;
    2762             : }
    2763             : 
    2764             : static inline struct net_device *
    2765          48 : netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
    2766             : {
    2767          48 :         return info->dev;
    2768             : }
    2769             : 
    2770             : static inline struct netlink_ext_ack *
    2771             : netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
    2772             : {
    2773             :         return info->extack;
    2774             : }
    2775             : 
    2776             : int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
    2777             : 
    2778             : 
    2779             : extern rwlock_t                         dev_base_lock;          /* Device list lock */
    2780             : 
    2781             : #define for_each_netdev(net, d)         \
    2782             :                 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
    2783             : #define for_each_netdev_reverse(net, d) \
    2784             :                 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
    2785             : #define for_each_netdev_rcu(net, d)             \
    2786             :                 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
    2787             : #define for_each_netdev_safe(net, d, n) \
    2788             :                 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
    2789             : #define for_each_netdev_continue(net, d)                \
    2790             :                 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
    2791             : #define for_each_netdev_continue_reverse(net, d)                \
    2792             :                 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
    2793             :                                                      dev_list)
    2794             : #define for_each_netdev_continue_rcu(net, d)            \
    2795             :         list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
    2796             : #define for_each_netdev_in_bond_rcu(bond, slave)        \
    2797             :                 for_each_netdev_rcu(&init_net, slave)       \
    2798             :                         if (netdev_master_upper_dev_get_rcu(slave) == (bond))
    2799             : #define net_device_entry(lh)    list_entry(lh, struct net_device, dev_list)
    2800             : 
    2801             : static inline struct net_device *next_net_device(struct net_device *dev)
    2802             : {
    2803             :         struct list_head *lh;
    2804             :         struct net *net;
    2805             : 
    2806             :         net = dev_net(dev);
    2807             :         lh = dev->dev_list.next;
    2808             :         return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
    2809             : }
    2810             : 
    2811           0 : static inline struct net_device *next_net_device_rcu(struct net_device *dev)
    2812             : {
    2813           0 :         struct list_head *lh;
    2814           0 :         struct net *net;
    2815             : 
    2816           0 :         net = dev_net(dev);
    2817           0 :         lh = rcu_dereference(list_next_rcu(&dev->dev_list));
    2818           0 :         return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
    2819             : }
    2820             : 
    2821             : static inline struct net_device *first_net_device(struct net *net)
    2822             : {
    2823             :         return list_empty(&net->dev_base_head) ? NULL :
    2824             :                 net_device_entry(net->dev_base_head.next);
    2825             : }
    2826             : 
    2827             : static inline struct net_device *first_net_device_rcu(struct net *net)
    2828             : {
    2829             :         struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
    2830             : 
    2831             :         return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
    2832             : }
    2833             : 
    2834             : int netdev_boot_setup_check(struct net_device *dev);
    2835             : unsigned long netdev_boot_base(const char *prefix, int unit);
    2836             : struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
    2837             :                                        const char *hwaddr);
    2838             : struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
    2839             : void dev_add_pack(struct packet_type *pt);
    2840             : void dev_remove_pack(struct packet_type *pt);
    2841             : void __dev_remove_pack(struct packet_type *pt);
    2842             : void dev_add_offload(struct packet_offload *po);
    2843             : void dev_remove_offload(struct packet_offload *po);
    2844             : 
    2845             : int dev_get_iflink(const struct net_device *dev);
    2846             : int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
    2847             : struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
    2848             :                                       unsigned short mask);
    2849             : struct net_device *dev_get_by_name(struct net *net, const char *name);
    2850             : struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
    2851             : struct net_device *__dev_get_by_name(struct net *net, const char *name);
    2852             : int dev_alloc_name(struct net_device *dev, const char *name);
    2853             : int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
    2854             : void dev_close(struct net_device *dev);
    2855             : void dev_close_many(struct list_head *head, bool unlink);
    2856             : void dev_disable_lro(struct net_device *dev);
    2857             : int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
    2858             : u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
    2859             :                      struct net_device *sb_dev);
    2860             : u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
    2861             :                        struct net_device *sb_dev);
    2862             : 
    2863             : int dev_queue_xmit(struct sk_buff *skb);
    2864             : int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
    2865             : int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
    2866             : 
    2867           0 : static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
    2868             : {
    2869           0 :         int ret;
    2870             : 
    2871           0 :         ret = __dev_direct_xmit(skb, queue_id);
    2872           0 :         if (!dev_xmit_complete(ret))
    2873           0 :                 kfree_skb(skb);
    2874           0 :         return ret;
    2875             : }
    2876             : 
    2877             : int register_netdevice(struct net_device *dev);
    2878             : void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
    2879             : void unregister_netdevice_many(struct list_head *head);
    2880           0 : static inline void unregister_netdevice(struct net_device *dev)
    2881             : {
    2882           0 :         unregister_netdevice_queue(dev, NULL);
    2883           0 : }
    2884             : 
    2885             : int netdev_refcnt_read(const struct net_device *dev);
    2886             : void free_netdev(struct net_device *dev);
    2887             : void netdev_freemem(struct net_device *dev);
    2888             : int init_dummy_netdev(struct net_device *dev);
    2889             : 
    2890             : struct net_device *netdev_get_xmit_slave(struct net_device *dev,
    2891             :                                          struct sk_buff *skb,
    2892             :                                          bool all_slaves);
    2893             : struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
    2894             :                                             struct sock *sk);
    2895             : struct net_device *dev_get_by_index(struct net *net, int ifindex);
    2896             : struct net_device *__dev_get_by_index(struct net *net, int ifindex);
    2897             : struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
    2898             : struct net_device *dev_get_by_napi_id(unsigned int napi_id);
    2899             : int netdev_get_name(struct net *net, char *name, int ifindex);
    2900             : int dev_restart(struct net_device *dev);
    2901             : int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
    2902             : int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
    2903             : 
    2904        4994 : static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
    2905             : {
    2906        3580 :         return NAPI_GRO_CB(skb)->data_offset;
    2907             : }
    2908             : 
    2909        3378 : static inline unsigned int skb_gro_len(const struct sk_buff *skb)
    2910             : {
    2911        2671 :         return skb->len - NAPI_GRO_CB(skb)->data_offset;
    2912             : }
    2913             : 
    2914        1412 : static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
    2915             : {
    2916        1412 :         NAPI_GRO_CB(skb)->data_offset += len;
    2917             : }
    2918             : 
    2919        1428 : static inline void *skb_gro_header_fast(struct sk_buff *skb,
    2920             :                                         unsigned int offset)
    2921             : {
    2922        1428 :         return NAPI_GRO_CB(skb)->frag0 + offset;
    2923             : }
    2924             : 
    2925        2133 : static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
    2926             : {
    2927        2133 :         return NAPI_GRO_CB(skb)->frag0_len < hlen;
    2928             : }
    2929             : 
    2930        2133 : static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
    2931             : {
    2932        2133 :         NAPI_GRO_CB(skb)->frag0 = NULL;
    2933        2133 :         NAPI_GRO_CB(skb)->frag0_len = 0;
    2934             : }
    2935             : 
    2936        2133 : static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
    2937             :                                         unsigned int offset)
    2938             : {
    2939        2133 :         if (!pskb_may_pull(skb, hlen))
    2940             :                 return NULL;
    2941             : 
    2942        2133 :         skb_gro_frag0_invalidate(skb);
    2943        2133 :         return skb->data + offset;
    2944             : }
    2945             : 
    2946         707 : static inline void *skb_gro_network_header(struct sk_buff *skb)
    2947             : {
    2948         707 :         return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
    2949         705 :                skb_network_offset(skb);
    2950             : }
    2951             : 
    2952           0 : static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
    2953             :                                         const void *start, unsigned int len)
    2954             : {
    2955           0 :         if (NAPI_GRO_CB(skb)->csum_valid)
    2956           0 :                 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
    2957             :                                                   csum_partial(start, len, 0));
    2958           0 : }
    2959             : 
    2960             : /* GRO checksum functions. These are logical equivalents of the normal
    2961             :  * checksum functions (in skbuff.h) except that they operate on the GRO
    2962             :  * offsets and fields in sk_buff.
    2963             :  */
    2964             : 
    2965             : __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
    2966             : 
    2967         707 : static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
    2968             : {
    2969         707 :         return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
    2970             : }
    2971             : 
    2972         707 : static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
    2973             :                                                       bool zero_okay,
    2974             :                                                       __sum16 check)
    2975             : {
    2976         707 :         return ((skb->ip_summed != CHECKSUM_PARTIAL ||
    2977           0 :                 skb_checksum_start_offset(skb) <
    2978           0 :                  skb_gro_offset(skb)) &&
    2979         707 :                 !skb_at_gro_remcsum_start(skb) &&
    2980        1414 :                 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
    2981         707 :                 (!zero_okay || check));
    2982             : }
    2983             : 
    2984         707 : static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
    2985             :                                                            __wsum psum)
    2986             : {
    2987         707 :         if (NAPI_GRO_CB(skb)->csum_valid &&
    2988           0 :             !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
    2989             :                 return 0;
    2990             : 
    2991         707 :         NAPI_GRO_CB(skb)->csum = psum;
    2992             : 
    2993         707 :         return __skb_gro_checksum_complete(skb);
    2994             : }
    2995             : 
    2996         707 : static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
    2997             : {
    2998         707 :         if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
    2999             :                 /* Consume a checksum from CHECKSUM_UNNECESSARY */
    3000           0 :                 NAPI_GRO_CB(skb)->csum_cnt--;
    3001             :         } else {
    3002             :                 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
    3003             :                  * verified a new top level checksum or an encapsulated one
    3004             :                  * during GRO. This saves work if we fallback to normal path.
    3005             :                  */
    3006         707 :                 __skb_incr_checksum_unnecessary(skb);
    3007             :         }
    3008         707 : }
    3009             : 
    3010             : #define __skb_gro_checksum_validate(skb, proto, zero_okay, check,       \
    3011             :                                     compute_pseudo)                     \
    3012             : ({                                                                      \
    3013             :         __sum16 __ret = 0;                                              \
    3014             :         if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))  \
    3015             :                 __ret = __skb_gro_checksum_validate_complete(skb,       \
    3016             :                                 compute_pseudo(skb, proto));            \
    3017             :         if (!__ret)                                                     \
    3018             :                 skb_gro_incr_csum_unnecessary(skb);                     \
    3019             :         __ret;                                                          \
    3020             : })
    3021             : 
    3022             : #define skb_gro_checksum_validate(skb, proto, compute_pseudo)           \
    3023             :         __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
    3024             : 
    3025             : #define skb_gro_checksum_validate_zero_check(skb, proto, check,         \
    3026             :                                              compute_pseudo)            \
    3027             :         __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
    3028             : 
    3029             : #define skb_gro_checksum_simple_validate(skb)                           \
    3030             :         __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
    3031             : 
    3032           2 : static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
    3033             : {
    3034           2 :         return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
    3035             :                 !NAPI_GRO_CB(skb)->csum_valid);
    3036             : }
    3037             : 
    3038           0 : static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
    3039             :                                               __wsum pseudo)
    3040             : {
    3041           0 :         NAPI_GRO_CB(skb)->csum = ~pseudo;
    3042           0 :         NAPI_GRO_CB(skb)->csum_valid = 1;
    3043           0 : }
    3044             : 
    3045             : #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)        \
    3046             : do {                                                                    \
    3047             :         if (__skb_gro_checksum_convert_check(skb))                      \
    3048             :                 __skb_gro_checksum_convert(skb,                         \
    3049             :                                            compute_pseudo(skb, proto)); \
    3050             : } while (0)
    3051             : 
    3052             : struct gro_remcsum {
    3053             :         int offset;
    3054             :         __wsum delta;
    3055             : };
    3056             : 
    3057             : static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
    3058             : {
    3059             :         grc->offset = 0;
    3060             :         grc->delta = 0;
    3061             : }
    3062             : 
    3063             : static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
    3064             :                                             unsigned int off, size_t hdrlen,
    3065             :                                             int start, int offset,
    3066             :                                             struct gro_remcsum *grc,
    3067             :                                             bool nopartial)
    3068             : {
    3069             :         __wsum delta;
    3070             :         size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
    3071             : 
    3072             :         BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
    3073             : 
    3074             :         if (!nopartial) {
    3075             :                 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
    3076             :                 return ptr;
    3077             :         }
    3078             : 
    3079             :         ptr = skb_gro_header_fast(skb, off);
    3080             :         if (skb_gro_header_hard(skb, off + plen)) {
    3081             :                 ptr = skb_gro_header_slow(skb, off + plen, off);
    3082             :                 if (!ptr)
    3083             :                         return NULL;
    3084             :         }
    3085             : 
    3086             :         delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
    3087             :                                start, offset);
    3088             : 
    3089             :         /* Adjust skb->csum since we changed the packet */
    3090             :         NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
    3091             : 
    3092             :         grc->offset = off + hdrlen + offset;
    3093             :         grc->delta = delta;
    3094             : 
    3095             :         return ptr;
    3096             : }
    3097             : 
    3098             : static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
    3099             :                                            struct gro_remcsum *grc)
    3100             : {
    3101             :         void *ptr;
    3102             :         size_t plen = grc->offset + sizeof(u16);
    3103             : 
    3104             :         if (!grc->delta)
    3105             :                 return;
    3106             : 
    3107             :         ptr = skb_gro_header_fast(skb, grc->offset);
    3108             :         if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
    3109             :                 ptr = skb_gro_header_slow(skb, plen, grc->offset);
    3110             :                 if (!ptr)
    3111             :                         return;
    3112             :         }
    3113             : 
    3114             :         remcsum_unadjust((__sum16 *)ptr, grc->delta);
    3115             : }
    3116             : 
    3117             : #ifdef CONFIG_XFRM_OFFLOAD
    3118             : static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
    3119             : {
    3120             :         if (PTR_ERR(pp) != -EINPROGRESS)
    3121             :                 NAPI_GRO_CB(skb)->flush |= flush;
    3122             : }
    3123             : static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
    3124             :                                                struct sk_buff *pp,
    3125             :                                                int flush,
    3126             :                                                struct gro_remcsum *grc)
    3127             : {
    3128             :         if (PTR_ERR(pp) != -EINPROGRESS) {
    3129             :                 NAPI_GRO_CB(skb)->flush |= flush;
    3130             :                 skb_gro_remcsum_cleanup(skb, grc);
    3131             :                 skb->remcsum_offload = 0;
    3132             :         }
    3133             : }
    3134             : #else
    3135         723 : static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
    3136             : {
    3137         723 :         NAPI_GRO_CB(skb)->flush |= flush;
    3138             : }
    3139             : static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
    3140             :                                                struct sk_buff *pp,
    3141             :                                                int flush,
    3142             :                                                struct gro_remcsum *grc)
    3143             : {
    3144             :         NAPI_GRO_CB(skb)->flush |= flush;
    3145             :         skb_gro_remcsum_cleanup(skb, grc);
    3146             :         skb->remcsum_offload = 0;
    3147             : }
    3148             : #endif
    3149             : 
    3150          10 : static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
    3151             :                                   unsigned short type,
    3152             :                                   const void *daddr, const void *saddr,
    3153             :                                   unsigned int len)
    3154             : {
    3155          10 :         if (!dev->header_ops || !dev->header_ops->create)
    3156             :                 return 0;
    3157             : 
    3158          10 :         return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
    3159             : }
    3160             : 
    3161           2 : static inline int dev_parse_header(const struct sk_buff *skb,
    3162             :                                    unsigned char *haddr)
    3163             : {
    3164           2 :         const struct net_device *dev = skb->dev;
    3165             : 
    3166           2 :         if (!dev->header_ops || !dev->header_ops->parse)
    3167             :                 return 0;
    3168           2 :         return dev->header_ops->parse(skb, haddr);
    3169             : }
    3170             : 
    3171           2 : static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
    3172             : {
    3173           2 :         const struct net_device *dev = skb->dev;
    3174             : 
    3175           2 :         if (!dev->header_ops || !dev->header_ops->parse_protocol)
    3176             :                 return 0;
    3177           2 :         return dev->header_ops->parse_protocol(skb);
    3178             : }
    3179             : 
    3180             : /* ll_header must have at least hard_header_len allocated */
    3181           2 : static inline bool dev_validate_header(const struct net_device *dev,
    3182             :                                        char *ll_header, int len)
    3183             : {
    3184           2 :         if (likely(len >= dev->hard_header_len))
    3185             :                 return true;
    3186           0 :         if (len < dev->min_header_len)
    3187             :                 return false;
    3188             : 
    3189           0 :         if (capable(CAP_SYS_RAWIO)) {
    3190           0 :                 memset(ll_header + len, 0, dev->hard_header_len - len);
    3191           0 :                 return true;
    3192             :         }
    3193             : 
    3194           0 :         if (dev->header_ops && dev->header_ops->validate)
    3195           0 :                 return dev->header_ops->validate(ll_header, len);
    3196             : 
    3197             :         return false;
    3198             : }
    3199             : 
    3200         908 : static inline bool dev_has_header(const struct net_device *dev)
    3201             : {
    3202         908 :         return dev->header_ops && dev->header_ops->create;
    3203             : }
    3204             : 
    3205             : typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
    3206             :                            int len, int size);
    3207             : int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
    3208             : static inline int unregister_gifconf(unsigned int family)
    3209             : {
    3210             :         return register_gifconf(family, NULL);
    3211             : }
    3212             : 
    3213             : #ifdef CONFIG_NET_FLOW_LIMIT
    3214             : #define FLOW_LIMIT_HISTORY      (1 << 7)  /* must be ^2 and !overflow buckets */
    3215             : struct sd_flow_limit {
    3216             :         u64                     count;
    3217             :         unsigned int            num_buckets;
    3218             :         unsigned int            history_head;
    3219             :         u16                     history[FLOW_LIMIT_HISTORY];
    3220             :         u8                      buckets[];
    3221             : };
    3222             : 
    3223             : extern int netdev_flow_limit_table_len;
    3224             : #endif /* CONFIG_NET_FLOW_LIMIT */
    3225             : 
    3226             : /*
    3227             :  * Incoming packets are placed on per-CPU queues
    3228             :  */
    3229             : struct softnet_data {
    3230             :         struct list_head        poll_list;
    3231             :         struct sk_buff_head     process_queue;
    3232             : 
    3233             :         /* stats */
    3234             :         unsigned int            processed;
    3235             :         unsigned int            time_squeeze;
    3236             :         unsigned int            received_rps;
    3237             : #ifdef CONFIG_RPS
    3238             :         struct softnet_data     *rps_ipi_list;
    3239             : #endif
    3240             : #ifdef CONFIG_NET_FLOW_LIMIT
    3241             :         struct sd_flow_limit __rcu *flow_limit;
    3242             : #endif
    3243             :         struct Qdisc            *output_queue;
    3244             :         struct Qdisc            **output_queue_tailp;
    3245             :         struct sk_buff          *completion_queue;
    3246             : #ifdef CONFIG_XFRM_OFFLOAD
    3247             :         struct sk_buff_head     xfrm_backlog;
    3248             : #endif
    3249             :         /* written and read only by owning cpu: */
    3250             :         struct {
    3251             :                 u16 recursion;
    3252             :                 u8  more;
    3253             :         } xmit;
    3254             : #ifdef CONFIG_RPS
    3255             :         /* input_queue_head should be written by cpu owning this struct,
    3256             :          * and only read by other cpus. Worth using a cache line.
    3257             :          */
    3258             :         unsigned int            input_queue_head ____cacheline_aligned_in_smp;
    3259             : 
    3260             :         /* Elements below can be accessed between CPUs for RPS/RFS */
    3261             :         call_single_data_t      csd ____cacheline_aligned_in_smp;
    3262             :         struct softnet_data     *rps_ipi_next;
    3263             :         unsigned int            cpu;
    3264             :         unsigned int            input_queue_tail;
    3265             : #endif
    3266             :         unsigned int            dropped;
    3267             :         struct sk_buff_head     input_pkt_queue;
    3268             :         struct napi_struct      backlog;
    3269             : 
    3270             : };
    3271             : 
    3272           0 : static inline void input_queue_head_incr(struct softnet_data *sd)
    3273             : {
    3274             : #ifdef CONFIG_RPS
    3275           0 :         sd->input_queue_head++;
    3276             : #endif
    3277           0 : }
    3278             : 
    3279           0 : static inline void input_queue_tail_incr_save(struct softnet_data *sd,
    3280             :                                               unsigned int *qtail)
    3281             : {
    3282             : #ifdef CONFIG_RPS
    3283           0 :         *qtail = ++sd->input_queue_tail;
    3284             : #endif
    3285             : }
    3286             : 
    3287             : DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
    3288             : 
    3289           0 : static inline int dev_recursion_level(void)
    3290             : {
    3291           0 :         return this_cpu_read(softnet_data.xmit.recursion);
    3292             : }
    3293             : 
    3294             : #define XMIT_RECURSION_LIMIT    8
    3295           0 : static inline bool dev_xmit_recursion(void)
    3296             : {
    3297           0 :         return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
    3298             :                         XMIT_RECURSION_LIMIT);
    3299             : }
    3300             : 
    3301           0 : static inline void dev_xmit_recursion_inc(void)
    3302             : {
    3303           0 :         __this_cpu_inc(softnet_data.xmit.recursion);
    3304             : }
    3305             : 
    3306           0 : static inline void dev_xmit_recursion_dec(void)
    3307             : {
    3308           0 :         __this_cpu_dec(softnet_data.xmit.recursion);
    3309           0 : }
    3310             : 
    3311             : void __netif_schedule(struct Qdisc *q);
    3312             : void netif_schedule_queue(struct netdev_queue *txq);
    3313             : 
    3314             : static inline void netif_tx_schedule_all(struct net_device *dev)
    3315             : {
    3316             :         unsigned int i;
    3317             : 
    3318             :         for (i = 0; i < dev->num_tx_queues; i++)
    3319             :                 netif_schedule_queue(netdev_get_tx_queue(dev, i));
    3320             : }
    3321             : 
    3322           0 : static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
    3323             : {
    3324           0 :         clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
    3325             : }
    3326             : 
    3327             : /**
    3328             :  *      netif_start_queue - allow transmit
    3329             :  *      @dev: network device
    3330             :  *
    3331             :  *      Allow upper layers to call the device hard_start_xmit routine.
    3332             :  */
    3333             : static inline void netif_start_queue(struct net_device *dev)
    3334             : {
    3335             :         netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
    3336             : }
    3337             : 
    3338             : static inline void netif_tx_start_all_queues(struct net_device *dev)
    3339             : {
    3340             :         unsigned int i;
    3341             : 
    3342             :         for (i = 0; i < dev->num_tx_queues; i++) {
    3343             :                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
    3344             :                 netif_tx_start_queue(txq);
    3345             :         }
    3346             : }
    3347             : 
    3348             : void netif_tx_wake_queue(struct netdev_queue *dev_queue);
    3349             : 
    3350             : /**
    3351             :  *      netif_wake_queue - restart transmit
    3352             :  *      @dev: network device
    3353             :  *
    3354             :  *      Allow upper layers to call the device hard_start_xmit routine.
    3355             :  *      Used for flow control when transmit resources are available.
    3356             :  */
    3357             : static inline void netif_wake_queue(struct net_device *dev)
    3358             : {
    3359             :         netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
    3360             : }
    3361             : 
    3362           1 : static inline void netif_tx_wake_all_queues(struct net_device *dev)
    3363             : {
    3364           1 :         unsigned int i;
    3365             : 
    3366           2 :         for (i = 0; i < dev->num_tx_queues; i++) {
    3367           1 :                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
    3368           1 :                 netif_tx_wake_queue(txq);
    3369             :         }
    3370           1 : }
    3371             : 
    3372           0 : static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
    3373             : {
    3374           0 :         set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
    3375             : }
    3376             : 
    3377             : /**
    3378             :  *      netif_stop_queue - stop transmitted packets
    3379             :  *      @dev: network device
    3380             :  *
    3381             :  *      Stop upper layers calling the device hard_start_xmit routine.
    3382             :  *      Used for flow control when transmit resources are unavailable.
    3383             :  */
    3384             : static inline void netif_stop_queue(struct net_device *dev)
    3385             : {
    3386             :         netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
    3387             : }
    3388             : 
    3389             : void netif_tx_stop_all_queues(struct net_device *dev);
    3390             : 
    3391         448 : static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
    3392             : {
    3393         448 :         return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
    3394             : }
    3395             : 
    3396             : /**
    3397             :  *      netif_queue_stopped - test if transmit queue is flowblocked
    3398             :  *      @dev: network device
    3399             :  *
    3400             :  *      Test if transmit queue on device is currently unable to send.
    3401             :  */
    3402             : static inline bool netif_queue_stopped(const struct net_device *dev)
    3403             : {
    3404             :         return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
    3405             : }
    3406             : 
    3407           0 : static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
    3408             : {
    3409           0 :         return dev_queue->state & QUEUE_STATE_ANY_XOFF;
    3410             : }
    3411             : 
    3412             : static inline bool
    3413        1344 : netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
    3414             : {
    3415        1344 :         return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
    3416             : }
    3417             : 
    3418             : static inline bool
    3419           0 : netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
    3420             : {
    3421           0 :         return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
    3422             : }
    3423             : 
    3424             : /**
    3425             :  *      netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
    3426             :  *      @dev_queue: pointer to transmit queue
    3427             :  *
    3428             :  * BQL enabled drivers might use this helper in their ndo_start_xmit(),
    3429             :  * to give appropriate hint to the CPU.
    3430             :  */
    3431             : static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
    3432             : {
    3433             : #ifdef CONFIG_BQL
    3434             :         prefetchw(&dev_queue->dql.num_queued);
    3435             : #endif
    3436             : }
    3437             : 
    3438             : /**
    3439             :  *      netdev_txq_bql_complete_prefetchw - prefetch bql data for write
    3440             :  *      @dev_queue: pointer to transmit queue
    3441             :  *
    3442             :  * BQL enabled drivers might use this helper in their TX completion path,
    3443             :  * to give appropriate hint to the CPU.
    3444             :  */
    3445             : static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
    3446             : {
    3447             : #ifdef CONFIG_BQL
    3448             :         prefetchw(&dev_queue->dql.limit);
    3449             : #endif
    3450             : }
    3451             : 
    3452             : static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
    3453             :                                         unsigned int bytes)
    3454             : {
    3455             : #ifdef CONFIG_BQL
    3456             :         dql_queued(&dev_queue->dql, bytes);
    3457             : 
    3458             :         if (likely(dql_avail(&dev_queue->dql) >= 0))
    3459             :                 return;
    3460             : 
    3461             :         set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
    3462             : 
    3463             :         /*
    3464             :          * The XOFF flag must be set before checking the dql_avail below,
    3465             :          * because in netdev_tx_completed_queue we update the dql_completed
    3466             :          * before checking the XOFF flag.
    3467             :          */
    3468             :         smp_mb();
    3469             : 
    3470             :         /* check again in case another CPU has just made room avail */
    3471             :         if (unlikely(dql_avail(&dev_queue->dql) >= 0))
    3472             :                 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
    3473             : #endif
    3474             : }
    3475             : 
    3476             : /* Variant of netdev_tx_sent_queue() for drivers that are aware
    3477             :  * that they should not test BQL status themselves.
    3478             :  * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
    3479             :  * skb of a batch.
    3480             :  * Returns true if the doorbell must be used to kick the NIC.
    3481             :  */
    3482             : static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
    3483             :                                           unsigned int bytes,
    3484             :                                           bool xmit_more)
    3485             : {
    3486             :         if (xmit_more) {
    3487             : #ifdef CONFIG_BQL
    3488             :                 dql_queued(&dev_queue->dql, bytes);
    3489             : #endif
    3490             :                 return netif_tx_queue_stopped(dev_queue);
    3491             :         }
    3492             :         netdev_tx_sent_queue(dev_queue, bytes);
    3493             :         return true;
    3494             : }
    3495             : 
    3496             : /**
    3497             :  *      netdev_sent_queue - report the number of bytes queued to hardware
    3498             :  *      @dev: network device
    3499             :  *      @bytes: number of bytes queued to the hardware device queue
    3500             :  *
    3501             :  *      Report the number of bytes queued for sending/completion to the network
    3502             :  *      device hardware queue. @bytes should be a good approximation and should
    3503             :  *      exactly match netdev_completed_queue() @bytes
    3504             :  */
    3505             : static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
    3506             : {
    3507             :         netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
    3508             : }
    3509             : 
    3510             : static inline bool __netdev_sent_queue(struct net_device *dev,
    3511             :                                        unsigned int bytes,
    3512             :                                        bool xmit_more)
    3513             : {
    3514             :         return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
    3515             :                                       xmit_more);
    3516             : }
    3517             : 
    3518             : static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
    3519             :                                              unsigned int pkts, unsigned int bytes)
    3520             : {
    3521             : #ifdef CONFIG_BQL
    3522             :         if (unlikely(!bytes))
    3523             :                 return;
    3524             : 
    3525             :         dql_completed(&dev_queue->dql, bytes);
    3526             : 
    3527             :         /*
    3528             :          * Without the memory barrier there is a small possiblity that
    3529             :          * netdev_tx_sent_queue will miss the update and cause the queue to
    3530             :          * be stopped forever
    3531             :          */
    3532             :         smp_mb();
    3533             : 
    3534             :         if (unlikely(dql_avail(&dev_queue->dql) < 0))
    3535             :                 return;
    3536             : 
    3537             :         if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
    3538             :                 netif_schedule_queue(dev_queue);
    3539             : #endif
    3540             : }
    3541             : 
    3542             : /**
    3543             :  *      netdev_completed_queue - report bytes and packets completed by device
    3544             :  *      @dev: network device
    3545             :  *      @pkts: actual number of packets sent over the medium
    3546             :  *      @bytes: actual number of bytes sent over the medium
    3547             :  *
    3548             :  *      Report the number of bytes and packets transmitted by the network device
    3549             :  *      hardware queue over the physical medium, @bytes must exactly match the
    3550             :  *      @bytes amount passed to netdev_sent_queue()
    3551             :  */
    3552             : static inline void netdev_completed_queue(struct net_device *dev,
    3553             :                                           unsigned int pkts, unsigned int bytes)
    3554             : {
    3555             :         netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
    3556             : }
    3557             : 
    3558             : static inline void netdev_tx_reset_queue(struct netdev_queue *q)
    3559             : {
    3560             : #ifdef CONFIG_BQL
    3561             :         clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
    3562             :         dql_reset(&q->dql);
    3563             : #endif
    3564             : }
    3565             : 
    3566             : /**
    3567             :  *      netdev_reset_queue - reset the packets and bytes count of a network device
    3568             :  *      @dev_queue: network device
    3569             :  *
    3570             :  *      Reset the bytes and packet count of a network device and clear the
    3571             :  *      software flow control OFF bit for this network device
    3572             :  */
    3573             : static inline void netdev_reset_queue(struct net_device *dev_queue)
    3574             : {
    3575             :         netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
    3576             : }
    3577             : 
    3578             : /**
    3579             :  *      netdev_cap_txqueue - check if selected tx queue exceeds device queues
    3580             :  *      @dev: network device
    3581             :  *      @queue_index: given tx queue index
    3582             :  *
    3583             :  *      Returns 0 if given tx queue index >= number of device tx queues,
    3584             :  *      otherwise returns the originally passed tx queue index.
    3585             :  */
    3586           0 : static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
    3587             : {
    3588           0 :         if (unlikely(queue_index >= dev->real_num_tx_queues)) {
    3589           0 :                 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
    3590             :                                      dev->name, queue_index,
    3591             :                                      dev->real_num_tx_queues);
    3592           0 :                 return 0;
    3593             :         }
    3594             : 
    3595             :         return queue_index;
    3596             : }
    3597             : 
    3598             : /**
    3599             :  *      netif_running - test if up
    3600             :  *      @dev: network device
    3601             :  *
    3602             :  *      Test if the device has been brought up.
    3603             :  */
    3604          37 : static inline bool netif_running(const struct net_device *dev)
    3605             : {
    3606          37 :         return test_bit(__LINK_STATE_START, &dev->state);
    3607             : }
    3608             : 
    3609             : /*
    3610             :  * Routines to manage the subqueues on a device.  We only need start,
    3611             :  * stop, and a check if it's stopped.  All other device management is
    3612             :  * done at the overall netdevice level.
    3613             :  * Also test the device if we're multiqueue.
    3614             :  */
    3615             : 
    3616             : /**
    3617             :  *      netif_start_subqueue - allow sending packets on subqueue
    3618             :  *      @dev: network device
    3619             :  *      @queue_index: sub queue index
    3620             :  *
    3621             :  * Start individual transmit queue of a device with multiple transmit queues.
    3622             :  */
    3623           0 : static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
    3624             : {
    3625           0 :         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
    3626             : 
    3627           0 :         netif_tx_start_queue(txq);
    3628             : }
    3629             : 
    3630             : /**
    3631             :  *      netif_stop_subqueue - stop sending packets on subqueue
    3632             :  *      @dev: network device
    3633             :  *      @queue_index: sub queue index
    3634             :  *
    3635             :  * Stop individual transmit queue of a device with multiple transmit queues.
    3636             :  */
    3637           0 : static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
    3638             : {
    3639           0 :         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
    3640           0 :         netif_tx_stop_queue(txq);
    3641             : }
    3642             : 
    3643             : /**
    3644             :  *      __netif_subqueue_stopped - test status of subqueue
    3645             :  *      @dev: network device
    3646             :  *      @queue_index: sub queue index
    3647             :  *
    3648             :  * Check individual transmit queue of a device with multiple transmit queues.
    3649             :  */
    3650             : static inline bool __netif_subqueue_stopped(const struct net_device *dev,
    3651             :                                             u16 queue_index)
    3652             : {
    3653             :         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
    3654             : 
    3655             :         return netif_tx_queue_stopped(txq);
    3656             : }
    3657             : 
    3658             : /**
    3659             :  *      netif_subqueue_stopped - test status of subqueue
    3660             :  *      @dev: network device
    3661             :  *      @skb: sub queue buffer pointer
    3662             :  *
    3663             :  * Check individual transmit queue of a device with multiple transmit queues.
    3664             :  */
    3665             : static inline bool netif_subqueue_stopped(const struct net_device *dev,
    3666             :                                           struct sk_buff *skb)
    3667             : {
    3668             :         return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
    3669             : }
    3670             : 
    3671             : /**
    3672             :  *      netif_wake_subqueue - allow sending packets on subqueue
    3673             :  *      @dev: network device
    3674             :  *      @queue_index: sub queue index
    3675             :  *
    3676             :  * Resume individual transmit queue of a device with multiple transmit queues.
    3677             :  */
    3678           0 : static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
    3679             : {
    3680           0 :         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
    3681             : 
    3682           0 :         netif_tx_wake_queue(txq);
    3683           0 : }
    3684             : 
    3685             : #ifdef CONFIG_XPS
    3686             : int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
    3687             :                         u16 index);
    3688             : int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
    3689             :                           u16 index, bool is_rxqs_map);
    3690             : 
    3691             : /**
    3692             :  *      netif_attr_test_mask - Test a CPU or Rx queue set in a mask
    3693             :  *      @j: CPU/Rx queue index
    3694             :  *      @mask: bitmask of all cpus/rx queues
    3695             :  *      @nr_bits: number of bits in the bitmask
    3696             :  *
    3697             :  * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
    3698             :  */
    3699           8 : static inline bool netif_attr_test_mask(unsigned long j,
    3700             :                                         const unsigned long *mask,
    3701             :                                         unsigned int nr_bits)
    3702             : {
    3703           8 :         cpu_max_bits_warn(j, nr_bits);
    3704           8 :         return test_bit(j, mask);
    3705             : }
    3706             : 
    3707             : /**
    3708             :  *      netif_attr_test_online - Test for online CPU/Rx queue
    3709             :  *      @j: CPU/Rx queue index
    3710             :  *      @online_mask: bitmask for CPUs/Rx queues that are online
    3711             :  *      @nr_bits: number of bits in the bitmask
    3712             :  *
    3713             :  * Returns true if a CPU/Rx queue is online.
    3714             :  */
    3715           8 : static inline bool netif_attr_test_online(unsigned long j,
    3716             :                                           const unsigned long *online_mask,
    3717             :                                           unsigned int nr_bits)
    3718             : {
    3719           8 :         cpu_max_bits_warn(j, nr_bits);
    3720             : 
    3721           8 :         if (online_mask)
    3722           8 :                 return test_bit(j, online_mask);
    3723             : 
    3724           0 :         return (j < nr_bits);
    3725             : }
    3726             : 
    3727             : /**
    3728             :  *      netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
    3729             :  *      @n: CPU/Rx queue index
    3730             :  *      @srcp: the cpumask/Rx queue mask pointer
    3731             :  *      @nr_bits: number of bits in the bitmask
    3732             :  *
    3733             :  * Returns >= nr_bits if no further CPUs/Rx queues set.
    3734             :  */
    3735          10 : static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
    3736             :                                                unsigned int nr_bits)
    3737             : {
    3738             :         /* -1 is a legal arg here. */
    3739          10 :         if (n != -1)
    3740          10 :                 cpu_max_bits_warn(n, nr_bits);
    3741             : 
    3742          10 :         if (srcp)
    3743          10 :                 return find_next_bit(srcp, nr_bits, n + 1);
    3744             : 
    3745           0 :         return n + 1;
    3746             : }
    3747             : 
    3748             : /**
    3749             :  *      netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
    3750             :  *      @n: CPU/Rx queue index
    3751             :  *      @src1p: the first CPUs/Rx queues mask pointer
    3752             :  *      @src2p: the second CPUs/Rx queues mask pointer
    3753             :  *      @nr_bits: number of bits in the bitmask
    3754             :  *
    3755             :  * Returns >= nr_bits if no further CPUs/Rx queues set in both.
    3756             :  */
    3757           5 : static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
    3758             :                                           const unsigned long *src2p,
    3759             :                                           unsigned int nr_bits)
    3760             : {
    3761             :         /* -1 is a legal arg here. */
    3762           5 :         if (n != -1)
    3763           5 :                 cpu_max_bits_warn(n, nr_bits);
    3764             : 
    3765           5 :         if (src1p && src2p)
    3766           5 :                 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
    3767           0 :         else if (src1p)
    3768           0 :                 return find_next_bit(src1p, nr_bits, n + 1);
    3769           0 :         else if (src2p)
    3770           0 :                 return find_next_bit(src2p, nr_bits, n + 1);
    3771             : 
    3772           0 :         return n + 1;
    3773             : }
    3774             : #else
    3775             : static inline int netif_set_xps_queue(struct net_device *dev,
    3776             :                                       const struct cpumask *mask,
    3777             :                                       u16 index)
    3778             : {
    3779             :         return 0;
    3780             : }
    3781             : 
    3782             : static inline int __netif_set_xps_queue(struct net_device *dev,
    3783             :                                         const unsigned long *mask,
    3784             :                                         u16 index, bool is_rxqs_map)
    3785             : {
    3786             :         return 0;
    3787             : }
    3788             : #endif
    3789             : 
    3790             : /**
    3791             :  *      netif_is_multiqueue - test if device has multiple transmit queues
    3792             :  *      @dev: network device
    3793             :  *
    3794             :  * Check if device has multiple transmit queues
    3795             :  */
    3796           6 : static inline bool netif_is_multiqueue(const struct net_device *dev)
    3797             : {
    3798           6 :         return dev->num_tx_queues > 1;
    3799             : }
    3800             : 
    3801             : int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
    3802             : 
    3803             : #ifdef CONFIG_SYSFS
    3804             : int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
    3805             : #else
    3806             : static inline int netif_set_real_num_rx_queues(struct net_device *dev,
    3807             :                                                 unsigned int rxqs)
    3808             : {
    3809             :         dev->real_num_rx_queues = rxqs;
    3810             :         return 0;
    3811             : }
    3812             : #endif
    3813             : 
    3814             : static inline struct netdev_rx_queue *
    3815             : __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
    3816             : {
    3817             :         return dev->_rx + rxq;
    3818             : }
    3819             : 
    3820             : #ifdef CONFIG_SYSFS
    3821           0 : static inline unsigned int get_netdev_rx_queue_index(
    3822             :                 struct netdev_rx_queue *queue)
    3823             : {
    3824           0 :         struct net_device *dev = queue->dev;
    3825           0 :         int index = queue - dev->_rx;
    3826             : 
    3827           0 :         BUG_ON(index >= dev->num_rx_queues);
    3828           0 :         return index;
    3829             : }
    3830             : #endif
    3831             : 
    3832             : #define DEFAULT_MAX_NUM_RSS_QUEUES      (8)
    3833             : int netif_get_num_default_rss_queues(void);
    3834             : 
    3835             : enum skb_free_reason {
    3836             :         SKB_REASON_CONSUMED,
    3837             :         SKB_REASON_DROPPED,
    3838             : };
    3839             : 
    3840             : void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
    3841             : void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
    3842             : 
    3843             : /*
    3844             :  * It is not allowed to call kfree_skb() or consume_skb() from hardware
    3845             :  * interrupt context or with hardware interrupts being disabled.
    3846             :  * (in_irq() || irqs_disabled())
    3847             :  *
    3848             :  * We provide four helpers that can be used in following contexts :
    3849             :  *
    3850             :  * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
    3851             :  *  replacing kfree_skb(skb)
    3852             :  *
    3853             :  * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
    3854             :  *  Typically used in place of consume_skb(skb) in TX completion path
    3855             :  *
    3856             :  * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
    3857             :  *  replacing kfree_skb(skb)
    3858             :  *
    3859             :  * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
    3860             :  *  and consumed a packet. Used in place of consume_skb(skb)
    3861             :  */
    3862           0 : static inline void dev_kfree_skb_irq(struct sk_buff *skb)
    3863             : {
    3864           0 :         __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
    3865             : }
    3866             : 
    3867             : static inline void dev_consume_skb_irq(struct sk_buff *skb)
    3868             : {
    3869             :         __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
    3870             : }
    3871             : 
    3872           0 : static inline void dev_kfree_skb_any(struct sk_buff *skb)
    3873             : {
    3874           0 :         __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
    3875             : }
    3876             : 
    3877          35 : static inline void dev_consume_skb_any(struct sk_buff *skb)
    3878             : {
    3879          35 :         __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
    3880             : }
    3881             : 
    3882             : void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
    3883             : int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
    3884             : int netif_rx(struct sk_buff *skb);
    3885             : int netif_rx_ni(struct sk_buff *skb);
    3886             : int netif_rx_any_context(struct sk_buff *skb);
    3887             : int netif_receive_skb(struct sk_buff *skb);
    3888             : int netif_receive_skb_core(struct sk_buff *skb);
    3889             : void netif_receive_skb_list(struct list_head *head);
    3890             : gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
    3891             : void napi_gro_flush(struct napi_struct *napi, bool flush_old);
    3892             : struct sk_buff *napi_get_frags(struct napi_struct *napi);
    3893             : gro_result_t napi_gro_frags(struct napi_struct *napi);
    3894             : struct packet_offload *gro_find_receive_by_type(__be16 type);
    3895             : struct packet_offload *gro_find_complete_by_type(__be16 type);
    3896             : 
    3897           0 : static inline void napi_free_frags(struct napi_struct *napi)
    3898             : {
    3899           0 :         kfree_skb(napi->skb);
    3900           0 :         napi->skb = NULL;
    3901             : }
    3902             : 
    3903             : bool netdev_is_rx_handler_busy(struct net_device *dev);
    3904             : int netdev_rx_handler_register(struct net_device *dev,
    3905             :                                rx_handler_func_t *rx_handler,
    3906             :                                void *rx_handler_data);
    3907             : void netdev_rx_handler_unregister(struct net_device *dev);
    3908             : 
    3909             : bool dev_valid_name(const char *name);
    3910             : int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
    3911             :                 bool *need_copyout);
    3912             : int dev_ifconf(struct net *net, struct ifconf *, int);
    3913             : int dev_ethtool(struct net *net, struct ifreq *);
    3914             : unsigned int dev_get_flags(const struct net_device *);
    3915             : int __dev_change_flags(struct net_device *dev, unsigned int flags,
    3916             :                        struct netlink_ext_ack *extack);
    3917             : int dev_change_flags(struct net_device *dev, unsigned int flags,
    3918             :                      struct netlink_ext_ack *extack);
    3919             : void __dev_notify_flags(struct net_device *, unsigned int old_flags,
    3920             :                         unsigned int gchanges);
    3921             : int dev_change_name(struct net_device *, const char *);
    3922             : int dev_set_alias(struct net_device *, const char *, size_t);
    3923             : int dev_get_alias(const struct net_device *, char *, size_t);
    3924             : int dev_change_net_namespace(struct net_device *, struct net *, const char *);
    3925             : int __dev_set_mtu(struct net_device *, int);
    3926             : int dev_validate_mtu(struct net_device *dev, int mtu,
    3927             :                      struct netlink_ext_ack *extack);
    3928             : int dev_set_mtu_ext(struct net_device *dev, int mtu,
    3929             :                     struct netlink_ext_ack *extack);
    3930             : int dev_set_mtu(struct net_device *, int);
    3931             : int dev_change_tx_queue_len(struct net_device *, unsigned long);
    3932             : void dev_set_group(struct net_device *, int);
    3933             : int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
    3934             :                               struct netlink_ext_ack *extack);
    3935             : int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
    3936             :                         struct netlink_ext_ack *extack);
    3937             : int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
    3938             :                              struct netlink_ext_ack *extack);
    3939             : int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
    3940             : int dev_change_carrier(struct net_device *, bool new_carrier);
    3941             : int dev_get_phys_port_id(struct net_device *dev,
    3942             :                          struct netdev_phys_item_id *ppid);
    3943             : int dev_get_phys_port_name(struct net_device *dev,
    3944             :                            char *name, size_t len);
    3945             : int dev_get_port_parent_id(struct net_device *dev,
    3946             :                            struct netdev_phys_item_id *ppid, bool recurse);
    3947             : bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
    3948             : int dev_change_proto_down(struct net_device *dev, bool proto_down);
    3949             : int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
    3950             : void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
    3951             :                                   u32 value);
    3952             : struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
    3953             : struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
    3954             :                                     struct netdev_queue *txq, int *ret);
    3955             : 
    3956             : typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
    3957             : int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
    3958             :                       int fd, int expected_fd, u32 flags);
    3959             : int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
    3960             : u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
    3961             : 
    3962             : int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
    3963             : int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
    3964             : int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
    3965             : bool is_skb_forwardable(const struct net_device *dev,
    3966             :                         const struct sk_buff *skb);
    3967             : 
    3968           0 : static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
    3969             :                                                  const struct sk_buff *skb,
    3970             :                                                  const bool check_mtu)
    3971             : {
    3972           0 :         const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
    3973           0 :         unsigned int len;
    3974             : 
    3975           0 :         if (!(dev->flags & IFF_UP))
    3976             :                 return false;
    3977             : 
    3978           0 :         if (!check_mtu)
    3979             :                 return true;
    3980             : 
    3981           0 :         len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
    3982           0 :         if (skb->len <= len)
    3983             :                 return true;
    3984             : 
    3985             :         /* if TSO is enabled, we don't care about the length as the packet
    3986             :          * could be forwarded without being segmented before
    3987             :          */
    3988           0 :         if (skb_is_gso(skb))
    3989           0 :                 return true;
    3990             : 
    3991             :         return false;
    3992             : }
    3993             : 
    3994           0 : static __always_inline int ____dev_forward_skb(struct net_device *dev,
    3995             :                                                struct sk_buff *skb,
    3996             :                                                const bool check_mtu)
    3997             : {
    3998           0 :         if (skb_orphan_frags(skb, GFP_ATOMIC) ||
    3999           0 :             unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
    4000           0 :                 atomic_long_inc(&dev->rx_dropped);
    4001           0 :                 kfree_skb(skb);
    4002           0 :                 return NET_RX_DROP;
    4003             :         }
    4004             : 
    4005           0 :         skb_scrub_packet(skb, true);
    4006           0 :         skb->priority = 0;
    4007           0 :         return 0;
    4008             : }
    4009             : 
    4010             : bool dev_nit_active(struct net_device *dev);
    4011             : void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
    4012             : 
    4013             : extern int              netdev_budget;
    4014             : extern unsigned int     netdev_budget_usecs;
    4015             : 
    4016             : /* Called by rtnetlink.c:rtnl_unlock() */
    4017             : void netdev_run_todo(void);
    4018             : 
    4019             : /**
    4020             :  *      dev_put - release reference to device
    4021             :  *      @dev: network device
    4022             :  *
    4023             :  * Release reference to device to allow it to be freed.
    4024             :  */
    4025          17 : static inline void dev_put(struct net_device *dev)
    4026             : {
    4027          17 :         this_cpu_dec(*dev->pcpu_refcnt);
    4028          13 : }
    4029             : 
    4030             : /**
    4031             :  *      dev_hold - get reference to device
    4032             :  *      @dev: network device
    4033             :  *
    4034             :  * Hold reference to device to keep it from being freed.
    4035             :  */
    4036          48 : static inline void dev_hold(struct net_device *dev)
    4037             : {
    4038          48 :         this_cpu_inc(*dev->pcpu_refcnt);
    4039          25 : }
    4040             : 
    4041             : /* Carrier loss detection, dial on demand. The functions netif_carrier_on
    4042             :  * and _off may be called from IRQ context, but it is caller
    4043             :  * who is responsible for serialization of these calls.
    4044             :  *
    4045             :  * The name carrier is inappropriate, these functions should really be
    4046             :  * called netif_lowerlayer_*() because they represent the state of any
    4047             :  * kind of lower layer not just hardware media.
    4048             :  */
    4049             : 
    4050             : void linkwatch_init_dev(struct net_device *dev);
    4051             : void linkwatch_fire_event(struct net_device *dev);
    4052             : void linkwatch_forget_dev(struct net_device *dev);
    4053             : 
    4054             : /**
    4055             :  *      netif_carrier_ok - test if carrier present
    4056             :  *      @dev: network device
    4057             :  *
    4058             :  * Check if carrier is present on device
    4059             :  */
    4060          41 : static inline bool netif_carrier_ok(const struct net_device *dev)
    4061             : {
    4062          41 :         return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
    4063             : }
    4064             : 
    4065             : unsigned long dev_trans_start(struct net_device *dev);
    4066             : 
    4067             : void __netdev_watchdog_up(struct net_device *dev);
    4068             : 
    4069             : void netif_carrier_on(struct net_device *dev);
    4070             : 
    4071             : void netif_carrier_off(struct net_device *dev);
    4072             : 
    4073             : /**
    4074             :  *      netif_dormant_on - mark device as dormant.
    4075             :  *      @dev: network device
    4076             :  *
    4077             :  * Mark device as dormant (as per RFC2863).
    4078             :  *
    4079             :  * The dormant state indicates that the relevant interface is not
    4080             :  * actually in a condition to pass packets (i.e., it is not 'up') but is
    4081             :  * in a "pending" state, waiting for some external event.  For "on-
    4082             :  * demand" interfaces, this new state identifies the situation where the
    4083             :  * interface is waiting for events to place it in the up state.
    4084             :  */
    4085           0 : static inline void netif_dormant_on(struct net_device *dev)
    4086             : {
    4087           0 :         if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
    4088           0 :                 linkwatch_fire_event(dev);
    4089           0 : }
    4090             : 
    4091             : /**
    4092             :  *      netif_dormant_off - set device as not dormant.
    4093             :  *      @dev: network device
    4094             :  *
    4095             :  * Device is not in dormant state.
    4096             :  */
    4097           0 : static inline void netif_dormant_off(struct net_device *dev)
    4098             : {
    4099           0 :         if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
    4100           0 :                 linkwatch_fire_event(dev);
    4101           0 : }
    4102             : 
    4103             : /**
    4104             :  *      netif_dormant - test if device is dormant
    4105             :  *      @dev: network device
    4106             :  *
    4107             :  * Check if device is dormant.
    4108             :  */
    4109          13 : static inline bool netif_dormant(const struct net_device *dev)
    4110             : {
    4111          13 :         return test_bit(__LINK_STATE_DORMANT, &dev->state);
    4112             : }
    4113             : 
    4114             : 
    4115             : /**
    4116             :  *      netif_testing_on - mark device as under test.
    4117             :  *      @dev: network device
    4118             :  *
    4119             :  * Mark device as under test (as per RFC2863).
    4120             :  *
    4121             :  * The testing state indicates that some test(s) must be performed on
    4122             :  * the interface. After completion, of the test, the interface state
    4123             :  * will change to up, dormant, or down, as appropriate.
    4124             :  */
    4125           0 : static inline void netif_testing_on(struct net_device *dev)
    4126             : {
    4127           0 :         if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
    4128           0 :                 linkwatch_fire_event(dev);
    4129           0 : }
    4130             : 
    4131             : /**
    4132             :  *      netif_testing_off - set device as not under test.
    4133             :  *      @dev: network device
    4134             :  *
    4135             :  * Device is not in testing state.
    4136             :  */
    4137           0 : static inline void netif_testing_off(struct net_device *dev)
    4138             : {
    4139           0 :         if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
    4140           0 :                 linkwatch_fire_event(dev);
    4141           0 : }
    4142             : 
    4143             : /**
    4144             :  *      netif_testing - test if device is under test
    4145             :  *      @dev: network device
    4146             :  *
    4147             :  * Check if device is under test
    4148             :  */
    4149           4 : static inline bool netif_testing(const struct net_device *dev)
    4150             : {
    4151           4 :         return test_bit(__LINK_STATE_TESTING, &dev->state);
    4152             : }
    4153             : 
    4154             : 
    4155             : /**
    4156             :  *      netif_oper_up - test if device is operational
    4157             :  *      @dev: network device
    4158             :  *
    4159             :  * Check if carrier is operational
    4160             :  */
    4161          10 : static inline bool netif_oper_up(const struct net_device *dev)
    4162             : {
    4163          10 :         return (dev->operstate == IF_OPER_UP ||
    4164             :                 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
    4165             : }
    4166             : 
    4167             : /**
    4168             :  *      netif_device_present - is device available or removed
    4169             :  *      @dev: network device
    4170             :  *
    4171             :  * Check if device has not been removed from system.
    4172             :  */
    4173          14 : static inline bool netif_device_present(struct net_device *dev)
    4174             : {
    4175          14 :         return test_bit(__LINK_STATE_PRESENT, &dev->state);
    4176             : }
    4177             : 
    4178             : void netif_device_detach(struct net_device *dev);
    4179             : 
    4180             : void netif_device_attach(struct net_device *dev);
    4181             : 
    4182             : /*
    4183             :  * Network interface message level settings
    4184             :  */
    4185             : 
    4186             : enum {
    4187             :         NETIF_MSG_DRV_BIT,
    4188             :         NETIF_MSG_PROBE_BIT,
    4189             :         NETIF_MSG_LINK_BIT,
    4190             :         NETIF_MSG_TIMER_BIT,
    4191             :         NETIF_MSG_IFDOWN_BIT,
    4192             :         NETIF_MSG_IFUP_BIT,
    4193             :         NETIF_MSG_RX_ERR_BIT,
    4194             :         NETIF_MSG_TX_ERR_BIT,
    4195             :         NETIF_MSG_TX_QUEUED_BIT,
    4196             :         NETIF_MSG_INTR_BIT,
    4197             :         NETIF_MSG_TX_DONE_BIT,
    4198             :         NETIF_MSG_RX_STATUS_BIT,
    4199             :         NETIF_MSG_PKTDATA_BIT,
    4200             :         NETIF_MSG_HW_BIT,
    4201             :         NETIF_MSG_WOL_BIT,
    4202             : 
    4203             :         /* When you add a new bit above, update netif_msg_class_names array
    4204             :          * in net/ethtool/common.c
    4205             :          */
    4206             :         NETIF_MSG_CLASS_COUNT,
    4207             : };
    4208             : /* Both ethtool_ops interface and internal driver implementation use u32 */
    4209             : static_assert(NETIF_MSG_CLASS_COUNT <= 32);
    4210             : 
    4211             : #define __NETIF_MSG_BIT(bit)    ((u32)1 << (bit))
    4212             : #define __NETIF_MSG(name)       __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
    4213             : 
    4214             : #define NETIF_MSG_DRV           __NETIF_MSG(DRV)
    4215             : #define NETIF_MSG_PROBE         __NETIF_MSG(PROBE)
    4216             : #define NETIF_MSG_LINK          __NETIF_MSG(LINK)
    4217             : #define NETIF_MSG_TIMER         __NETIF_MSG(TIMER)
    4218             : #define NETIF_MSG_IFDOWN        __NETIF_MSG(IFDOWN)
    4219             : #define NETIF_MSG_IFUP          __NETIF_MSG(IFUP)
    4220             : #define NETIF_MSG_RX_ERR        __NETIF_MSG(RX_ERR)
    4221             : #define NETIF_MSG_TX_ERR        __NETIF_MSG(TX_ERR)
    4222             : #define NETIF_MSG_TX_QUEUED     __NETIF_MSG(TX_QUEUED)
    4223             : #define NETIF_MSG_INTR          __NETIF_MSG(INTR)
    4224             : #define NETIF_MSG_TX_DONE       __NETIF_MSG(TX_DONE)
    4225             : #define NETIF_MSG_RX_STATUS     __NETIF_MSG(RX_STATUS)
    4226             : #define NETIF_MSG_PKTDATA       __NETIF_MSG(PKTDATA)
    4227             : #define NETIF_MSG_HW            __NETIF_MSG(HW)
    4228             : #define NETIF_MSG_WOL           __NETIF_MSG(WOL)
    4229             : 
    4230             : #define netif_msg_drv(p)        ((p)->msg_enable & NETIF_MSG_DRV)
    4231             : #define netif_msg_probe(p)      ((p)->msg_enable & NETIF_MSG_PROBE)
    4232             : #define netif_msg_link(p)       ((p)->msg_enable & NETIF_MSG_LINK)
    4233             : #define netif_msg_timer(p)      ((p)->msg_enable & NETIF_MSG_TIMER)
    4234             : #define netif_msg_ifdown(p)     ((p)->msg_enable & NETIF_MSG_IFDOWN)
    4235             : #define netif_msg_ifup(p)       ((p)->msg_enable & NETIF_MSG_IFUP)
    4236             : #define netif_msg_rx_err(p)     ((p)->msg_enable & NETIF_MSG_RX_ERR)
    4237             : #define netif_msg_tx_err(p)     ((p)->msg_enable & NETIF_MSG_TX_ERR)
    4238             : #define netif_msg_tx_queued(p)  ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
    4239             : #define netif_msg_intr(p)       ((p)->msg_enable & NETIF_MSG_INTR)
    4240             : #define netif_msg_tx_done(p)    ((p)->msg_enable & NETIF_MSG_TX_DONE)
    4241             : #define netif_msg_rx_status(p)  ((p)->msg_enable & NETIF_MSG_RX_STATUS)
    4242             : #define netif_msg_pktdata(p)    ((p)->msg_enable & NETIF_MSG_PKTDATA)
    4243             : #define netif_msg_hw(p)         ((p)->msg_enable & NETIF_MSG_HW)
    4244             : #define netif_msg_wol(p)        ((p)->msg_enable & NETIF_MSG_WOL)
    4245             : 
    4246             : static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
    4247             : {
    4248             :         /* use default */
    4249             :         if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
    4250             :                 return default_msg_enable_bits;
    4251             :         if (debug_value == 0)   /* no output */
    4252             :                 return 0;
    4253             :         /* set low N bits */
    4254             :         return (1U << debug_value) - 1;
    4255             : }
    4256             : 
    4257         885 : static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
    4258             : {
    4259         885 :         spin_lock(&txq->_xmit_lock);
    4260         885 :         txq->xmit_lock_owner = cpu;
    4261         448 : }
    4262             : 
    4263             : static inline bool __netif_tx_acquire(struct netdev_queue *txq)
    4264             : {
    4265             :         __acquire(&txq->_xmit_lock);
    4266             :         return true;
    4267             : }
    4268             : 
    4269             : static inline void __netif_tx_release(struct netdev_queue *txq)
    4270             : {
    4271             :         __release(&txq->_xmit_lock);
    4272             : }
    4273             : 
    4274             : static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
    4275             : {
    4276             :         spin_lock_bh(&txq->_xmit_lock);
    4277             :         txq->xmit_lock_owner = smp_processor_id();
    4278             : }
    4279             : 
    4280         418 : static inline bool __netif_tx_trylock(struct netdev_queue *txq)
    4281             : {
    4282         418 :         bool ok = spin_trylock(&txq->_xmit_lock);
    4283         418 :         if (likely(ok))
    4284         413 :                 txq->xmit_lock_owner = smp_processor_id();
    4285         418 :         return ok;
    4286             : }
    4287             : 
    4288        1298 : static inline void __netif_tx_unlock(struct netdev_queue *txq)
    4289             : {
    4290        1298 :         txq->xmit_lock_owner = -1;
    4291        1298 :         spin_unlock(&txq->_xmit_lock);
    4292         861 : }
    4293             : 
    4294             : static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
    4295             : {
    4296             :         txq->xmit_lock_owner = -1;
    4297             :         spin_unlock_bh(&txq->_xmit_lock);
    4298             : }
    4299             : 
    4300         448 : static inline void txq_trans_update(struct netdev_queue *txq)
    4301             : {
    4302         448 :         if (txq->xmit_lock_owner != -1)
    4303         448 :                 txq->trans_start = jiffies;
    4304             : }
    4305             : 
    4306             : /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
    4307           3 : static inline void netif_trans_update(struct net_device *dev)
    4308             : {
    4309           3 :         struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
    4310             : 
    4311           3 :         if (txq->trans_start != jiffies)
    4312           3 :                 txq->trans_start = jiffies;
    4313             : }
    4314             : 
    4315             : /**
    4316             :  *      netif_tx_lock - grab network device transmit lock
    4317             :  *      @dev: network device
    4318             :  *
    4319             :  * Get network device transmit lock
    4320             :  */
    4321           0 : static inline void netif_tx_lock(struct net_device *dev)
    4322             : {
    4323           0 :         unsigned int i;
    4324           0 :         int cpu;
    4325             : 
    4326           0 :         spin_lock(&dev->tx_global_lock);
    4327           0 :         cpu = smp_processor_id();
    4328           0 :         for (i = 0; i < dev->num_tx_queues; i++) {
    4329           0 :                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
    4330             : 
    4331             :                 /* We are the only thread of execution doing a
    4332             :                  * freeze, but we have to grab the _xmit_lock in
    4333             :                  * order to synchronize with threads which are in
    4334             :                  * the ->hard_start_xmit() handler and already
    4335             :                  * checked the frozen bit.
    4336             :                  */
    4337           0 :                 __netif_tx_lock(txq, cpu);
    4338           0 :                 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
    4339           0 :                 __netif_tx_unlock(txq);
    4340             :         }
    4341           0 : }
    4342             : 
    4343           0 : static inline void netif_tx_lock_bh(struct net_device *dev)
    4344             : {
    4345           0 :         local_bh_disable();
    4346           0 :         netif_tx_lock(dev);
    4347           0 : }
    4348             : 
    4349           0 : static inline void netif_tx_unlock(struct net_device *dev)
    4350             : {
    4351           0 :         unsigned int i;
    4352             : 
    4353           0 :         for (i = 0; i < dev->num_tx_queues; i++) {
    4354           0 :                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
    4355             : 
    4356             :                 /* No need to grab the _xmit_lock here.  If the
    4357             :                  * queue is not stopped for another reason, we
    4358             :                  * force a schedule.
    4359             :                  */
    4360           0 :                 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
    4361           0 :                 netif_schedule_queue(txq);
    4362             :         }
    4363           0 :         spin_unlock(&dev->tx_global_lock);
    4364           0 : }
    4365             : 
    4366           0 : static inline void netif_tx_unlock_bh(struct net_device *dev)
    4367             : {
    4368           0 :         netif_tx_unlock(dev);
    4369           0 :         local_bh_enable();
    4370           0 : }
    4371             : 
    4372             : #define HARD_TX_LOCK(dev, txq, cpu) {                   \
    4373             :         if ((dev->features & NETIF_F_LLTX) == 0) {       \
    4374             :                 __netif_tx_lock(txq, cpu);              \
    4375             :         } else {                                        \
    4376             :                 __netif_tx_acquire(txq);                \
    4377             :         }                                               \
    4378             : }
    4379             : 
    4380             : #define HARD_TX_TRYLOCK(dev, txq)                       \
    4381             :         (((dev->features & NETIF_F_LLTX) == 0) ? \
    4382             :                 __netif_tx_trylock(txq) :               \
    4383             :                 __netif_tx_acquire(txq))
    4384             : 
    4385             : #define HARD_TX_UNLOCK(dev, txq) {                      \
    4386             :         if ((dev->features & NETIF_F_LLTX) == 0) {       \
    4387             :                 __netif_tx_unlock(txq);                 \
    4388             :         } else {                                        \
    4389             :                 __netif_tx_release(txq);                \
    4390             :         }                                               \
    4391             : }
    4392             : 
    4393           0 : static inline void netif_tx_disable(struct net_device *dev)
    4394             : {
    4395           0 :         unsigned int i;
    4396           0 :         int cpu;
    4397             : 
    4398           0 :         local_bh_disable();
    4399           0 :         cpu = smp_processor_id();
    4400           0 :         spin_lock(&dev->tx_global_lock);
    4401           0 :         for (i = 0; i < dev->num_tx_queues; i++) {
    4402           0 :                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
    4403             : 
    4404           0 :                 __netif_tx_lock(txq, cpu);
    4405           0 :                 netif_tx_stop_queue(txq);
    4406           0 :                 __netif_tx_unlock(txq);
    4407             :         }
    4408           0 :         spin_unlock(&dev->tx_global_lock);
    4409           0 :         local_bh_enable();
    4410           0 : }
    4411             : 
    4412           0 : static inline void netif_addr_lock(struct net_device *dev)
    4413             : {
    4414           0 :         unsigned char nest_level = 0;
    4415             : 
    4416             : #ifdef CONFIG_LOCKDEP
    4417           0 :         nest_level = dev->nested_level;
    4418             : #endif
    4419           0 :         spin_lock_nested(&dev->addr_list_lock, nest_level);
    4420             : }
    4421             : 
    4422           6 : static inline void netif_addr_lock_bh(struct net_device *dev)
    4423             : {
    4424           6 :         unsigned char nest_level = 0;
    4425             : 
    4426             : #ifdef CONFIG_LOCKDEP
    4427           6 :         nest_level = dev->nested_level;
    4428             : #endif
    4429           6 :         local_bh_disable();
    4430           6 :         spin_lock_nested(&dev->addr_list_lock, nest_level);
    4431           6 : }
    4432             : 
    4433           0 : static inline void netif_addr_unlock(struct net_device *dev)
    4434             : {
    4435           0 :         spin_unlock(&dev->addr_list_lock);
    4436             : }
    4437             : 
    4438           6 : static inline void netif_addr_unlock_bh(struct net_device *dev)
    4439             : {
    4440           6 :         spin_unlock_bh(&dev->addr_list_lock);
    4441           0 : }
    4442             : 
    4443             : /*
    4444             :  * dev_addrs walker. Should be used only for read access. Call with
    4445             :  * rcu_read_lock held.
    4446             :  */
    4447             : #define for_each_dev_addr(dev, ha) \
    4448             :                 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
    4449             : 
    4450             : /* These functions live elsewhere (drivers/net/net_init.c, but related) */
    4451             : 
    4452             : void ether_setup(struct net_device *dev);
    4453             : 
    4454             : /* Support for loadable net-drivers */
    4455             : struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
    4456             :                                     unsigned char name_assign_type,
    4457             :                                     void (*setup)(struct net_device *),
    4458             :                                     unsigned int txqs, unsigned int rxqs);
    4459             : #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
    4460             :         alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
    4461             : 
    4462             : #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
    4463             :         alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
    4464             :                          count)
    4465             : 
    4466             : int register_netdev(struct net_device *dev);
    4467             : void unregister_netdev(struct net_device *dev);
    4468             : 
    4469             : int devm_register_netdev(struct device *dev, struct net_device *ndev);
    4470             : 
    4471             : /* General hardware address lists handling functions */
    4472             : int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
    4473             :                    struct netdev_hw_addr_list *from_list, int addr_len);
    4474             : void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
    4475             :                       struct netdev_hw_addr_list *from_list, int addr_len);
    4476             : int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
    4477             :                        struct net_device *dev,
    4478             :                        int (*sync)(struct net_device *, const unsigned char *),
    4479             :                        int (*unsync)(struct net_device *,
    4480             :                                      const unsigned char *));
    4481             : int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
    4482             :                            struct net_device *dev,
    4483             :                            int (*sync)(struct net_device *,
    4484             :                                        const unsigned char *, int),
    4485             :                            int (*unsync)(struct net_device *,
    4486             :                                          const unsigned char *, int));
    4487             : void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
    4488             :                               struct net_device *dev,
    4489             :                               int (*unsync)(struct net_device *,
    4490             :                                             const unsigned char *, int));
    4491             : void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
    4492             :                           struct net_device *dev,
    4493             :                           int (*unsync)(struct net_device *,
    4494             :                                         const unsigned char *));
    4495             : void __hw_addr_init(struct netdev_hw_addr_list *list);
    4496             : 
    4497             : /* Functions used for device addresses handling */
    4498             : int dev_addr_add(struct net_device *dev, const unsigned char *addr,
    4499             :                  unsigned char addr_type);
    4500             : int dev_addr_del(struct net_device *dev, const unsigned char *addr,
    4501             :                  unsigned char addr_type);
    4502             : void dev_addr_flush(struct net_device *dev);
    4503             : int dev_addr_init(struct net_device *dev);
    4504             : 
    4505             : /* Functions used for unicast addresses handling */
    4506             : int dev_uc_add(struct net_device *dev, const unsigned char *addr);
    4507             : int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
    4508             : int dev_uc_del(struct net_device *dev, const unsigned char *addr);
    4509             : int dev_uc_sync(struct net_device *to, struct net_device *from);
    4510             : int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
    4511             : void dev_uc_unsync(struct net_device *to, struct net_device *from);
    4512             : void dev_uc_flush(struct net_device *dev);
    4513             : void dev_uc_init(struct net_device *dev);
    4514             : 
    4515             : /**
    4516             :  *  __dev_uc_sync - Synchonize device's unicast list
    4517             :  *  @dev:  device to sync
    4518             :  *  @sync: function to call if address should be added
    4519             :  *  @unsync: function to call if address should be removed
    4520             :  *
    4521             :  *  Add newly added addresses to the interface, and release
    4522             :  *  addresses that have been deleted.
    4523             :  */
    4524             : static inline int __dev_uc_sync(struct net_device *dev,
    4525             :                                 int (*sync)(struct net_device *,
    4526             :                                             const unsigned char *),
    4527             :                                 int (*unsync)(struct net_device *,
    4528             :                                               const unsigned char *))
    4529             : {
    4530             :         return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
    4531             : }
    4532             : 
    4533             : /**
    4534             :  *  __dev_uc_unsync - Remove synchronized addresses from device
    4535             :  *  @dev:  device to sync
    4536             :  *  @unsync: function to call if address should be removed
    4537             :  *
    4538             :  *  Remove all addresses that were added to the device by dev_uc_sync().
    4539             :  */
    4540             : static inline void __dev_uc_unsync(struct net_device *dev,
    4541             :                                    int (*unsync)(struct net_device *,
    4542             :                                                  const unsigned char *))
    4543             : {
    4544             :         __hw_addr_unsync_dev(&dev->uc, dev, unsync);
    4545             : }
    4546             : 
    4547             : /* Functions used for multicast addresses handling */
    4548             : int dev_mc_add(struct net_device *dev, const unsigned char *addr);
    4549             : int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
    4550             : int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
    4551             : int dev_mc_del(struct net_device *dev, const unsigned char *addr);
    4552             : int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
    4553             : int dev_mc_sync(struct net_device *to, struct net_device *from);
    4554             : int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
    4555             : void dev_mc_unsync(struct net_device *to, struct net_device *from);
    4556             : void dev_mc_flush(struct net_device *dev);
    4557             : void dev_mc_init(struct net_device *dev);
    4558             : 
    4559             : /**
    4560             :  *  __dev_mc_sync - Synchonize device's multicast list
    4561             :  *  @dev:  device to sync
    4562             :  *  @sync: function to call if address should be added
    4563             :  *  @unsync: function to call if address should be removed
    4564             :  *
    4565             :  *  Add newly added addresses to the interface, and release
    4566             :  *  addresses that have been deleted.
    4567             :  */
    4568             : static inline int __dev_mc_sync(struct net_device *dev,
    4569             :                                 int (*sync)(struct net_device *,
    4570             :                                             const unsigned char *),
    4571             :                                 int (*unsync)(struct net_device *,
    4572             :                                               const unsigned char *))
    4573             : {
    4574             :         return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
    4575             : }
    4576             : 
    4577             : /**
    4578             :  *  __dev_mc_unsync - Remove synchronized addresses from device
    4579             :  *  @dev:  device to sync
    4580             :  *  @unsync: function to call if address should be removed
    4581             :  *
    4582             :  *  Remove all addresses that were added to the device by dev_mc_sync().
    4583             :  */
    4584             : static inline void __dev_mc_unsync(struct net_device *dev,
    4585             :                                    int (*unsync)(struct net_device *,
    4586             :                                                  const unsigned char *))
    4587             : {
    4588             :         __hw_addr_unsync_dev(&dev->mc, dev, unsync);
    4589             : }
    4590             : 
    4591             : /* Functions used for secondary unicast and multicast support */
    4592             : void dev_set_rx_mode(struct net_device *dev);
    4593             : void __dev_set_rx_mode(struct net_device *dev);
    4594             : int dev_set_promiscuity(struct net_device *dev, int inc);
    4595             : int dev_set_allmulti(struct net_device *dev, int inc);
    4596             : void netdev_state_change(struct net_device *dev);
    4597             : void __netdev_notify_peers(struct net_device *dev);
    4598             : void netdev_notify_peers(struct net_device *dev);
    4599             : void netdev_features_change(struct net_device *dev);
    4600             : /* Load a device via the kmod */
    4601             : void dev_load(struct net *net, const char *name);
    4602             : struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
    4603             :                                         struct rtnl_link_stats64 *storage);
    4604             : void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
    4605             :                              const struct net_device_stats *netdev_stats);
    4606             : void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
    4607             :                            const struct pcpu_sw_netstats __percpu *netstats);
    4608             : void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
    4609             : 
    4610             : extern int              netdev_max_backlog;
    4611             : extern int              netdev_tstamp_prequeue;
    4612             : extern int              weight_p;
    4613             : extern int              dev_weight_rx_bias;
    4614             : extern int              dev_weight_tx_bias;
    4615             : extern int              dev_rx_weight;
    4616             : extern int              dev_tx_weight;
    4617             : extern int              gro_normal_batch;
    4618             : 
    4619             : enum {
    4620             :         NESTED_SYNC_IMM_BIT,
    4621             :         NESTED_SYNC_TODO_BIT,
    4622             : };
    4623             : 
    4624             : #define __NESTED_SYNC_BIT(bit)  ((u32)1 << (bit))
    4625             : #define __NESTED_SYNC(name)     __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
    4626             : 
    4627             : #define NESTED_SYNC_IMM         __NESTED_SYNC(IMM)
    4628             : #define NESTED_SYNC_TODO        __NESTED_SYNC(TODO)
    4629             : 
    4630             : struct netdev_nested_priv {
    4631             :         unsigned char flags;
    4632             :         void *data;
    4633             : };
    4634             : 
    4635             : bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
    4636             : struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
    4637             :                                                      struct list_head **iter);
    4638             : struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
    4639             :                                                      struct list_head **iter);
    4640             : 
    4641             : #ifdef CONFIG_LOCKDEP
    4642             : static LIST_HEAD(net_unlink_list);
    4643             : 
    4644           0 : static inline void net_unlink_todo(struct net_device *dev)
    4645             : {
    4646           0 :         if (list_empty(&dev->unlink_list))
    4647           0 :                 list_add_tail(&dev->unlink_list, &net_unlink_list);
    4648             : }
    4649             : #endif
    4650             : 
    4651             : /* iterate through upper list, must be called under RCU read lock */
    4652             : #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
    4653             :         for (iter = &(dev)->adj_list.upper, \
    4654             :              updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
    4655             :              updev; \
    4656             :              updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
    4657             : 
    4658             : int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
    4659             :                                   int (*fn)(struct net_device *upper_dev,
    4660             :                                             struct netdev_nested_priv *priv),
    4661             :                                   struct netdev_nested_priv *priv);
    4662             : 
    4663             : bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
    4664             :                                   struct net_device *upper_dev);
    4665             : 
    4666             : bool netdev_has_any_upper_dev(struct net_device *dev);
    4667             : 
    4668             : void *netdev_lower_get_next_private(struct net_device *dev,
    4669             :                                     struct list_head **iter);
    4670             : void *netdev_lower_get_next_private_rcu(struct net_device *dev,
    4671             :                                         struct list_head **iter);
    4672             : 
    4673             : #define netdev_for_each_lower_private(dev, priv, iter) \
    4674             :         for (iter = (dev)->adj_list.lower.next, \
    4675             :              priv = netdev_lower_get_next_private(dev, &(iter)); \
    4676             :              priv; \
    4677             :              priv = netdev_lower_get_next_private(dev, &(iter)))
    4678             : 
    4679             : #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
    4680             :         for (iter = &(dev)->adj_list.lower, \
    4681             :              priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
    4682             :              priv; \
    4683             :              priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
    4684             : 
    4685             : void *netdev_lower_get_next(struct net_device *dev,
    4686             :                                 struct list_head **iter);
    4687             : 
    4688             : #define netdev_for_each_lower_dev(dev, ldev, iter) \
    4689             :         for (iter = (dev)->adj_list.lower.next, \
    4690             :              ldev = netdev_lower_get_next(dev, &(iter)); \
    4691             :              ldev; \
    4692             :              ldev = netdev_lower_get_next(dev, &(iter)))
    4693             : 
    4694             : struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
    4695             :                                              struct list_head **iter);
    4696             : int netdev_walk_all_lower_dev(struct net_device *dev,
    4697             :                               int (*fn)(struct net_device *lower_dev,
    4698             :                                         struct netdev_nested_priv *priv),
    4699             :                               struct netdev_nested_priv *priv);
    4700             : int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
    4701             :                                   int (*fn)(struct net_device *lower_dev,
    4702             :                                             struct netdev_nested_priv *priv),
    4703             :                                   struct netdev_nested_priv *priv);
    4704             : 
    4705             : void *netdev_adjacent_get_private(struct list_head *adj_list);
    4706             : void *netdev_lower_get_first_private_rcu(struct net_device *dev);
    4707             : struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
    4708             : struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
    4709             : int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
    4710             :                           struct netlink_ext_ack *extack);
    4711             : int netdev_master_upper_dev_link(struct net_device *dev,
    4712             :                                  struct net_device *upper_dev,
    4713             :                                  void *upper_priv, void *upper_info,
    4714             :                                  struct netlink_ext_ack *extack);
    4715             : void netdev_upper_dev_unlink(struct net_device *dev,
    4716             :                              struct net_device *upper_dev);
    4717             : int netdev_adjacent_change_prepare(struct net_device *old_dev,
    4718             :                                    struct net_device *new_dev,
    4719             :                                    struct net_device *dev,
    4720             :                                    struct netlink_ext_ack *extack);
    4721             : void netdev_adjacent_change_commit(struct net_device *old_dev,
    4722             :                                    struct net_device *new_dev,
    4723             :                                    struct net_device *dev);
    4724             : void netdev_adjacent_change_abort(struct net_device *old_dev,
    4725             :                                   struct net_device *new_dev,
    4726             :                                   struct net_device *dev);
    4727             : void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
    4728             : void *netdev_lower_dev_get_private(struct net_device *dev,
    4729             :                                    struct net_device *lower_dev);
    4730             : void netdev_lower_state_changed(struct net_device *lower_dev,
    4731             :                                 void *lower_state_info);
    4732             : 
    4733             : /* RSS keys are 40 or 52 bytes long */
    4734             : #define NETDEV_RSS_KEY_LEN 52
    4735             : extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
    4736             : void netdev_rss_key_fill(void *buffer, size_t len);
    4737             : 
    4738             : int skb_checksum_help(struct sk_buff *skb);
    4739             : int skb_crc32c_csum_help(struct sk_buff *skb);
    4740             : int skb_csum_hwoffload_help(struct sk_buff *skb,
    4741             :                             const netdev_features_t features);
    4742             : 
    4743             : struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
    4744             :                                   netdev_features_t features, bool tx_path);
    4745             : struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
    4746             :                                     netdev_features_t features);
    4747             : 
    4748             : struct netdev_bonding_info {
    4749             :         ifslave slave;
    4750             :         ifbond  master;
    4751             : };
    4752             : 
    4753             : struct netdev_notifier_bonding_info {
    4754             :         struct netdev_notifier_info info; /* must be first */
    4755             :         struct netdev_bonding_info  bonding_info;
    4756             : };
    4757             : 
    4758             : void netdev_bonding_info_change(struct net_device *dev,
    4759             :                                 struct netdev_bonding_info *bonding_info);
    4760             : 
    4761             : #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
    4762             : void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
    4763             : #else
    4764           0 : static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
    4765             :                                   const void *data)
    4766             : {
    4767           0 : }
    4768             : #endif
    4769             : 
    4770             : static inline
    4771           0 : struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
    4772             : {
    4773           0 :         return __skb_gso_segment(skb, features, true);
    4774             : }
    4775             : __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
    4776             : 
    4777         430 : static inline bool can_checksum_protocol(netdev_features_t features,
    4778             :                                          __be16 protocol)
    4779             : {
    4780         430 :         if (protocol == htons(ETH_P_FCOE))
    4781           0 :                 return !!(features & NETIF_F_FCOE_CRC);
    4782             : 
    4783             :         /* Assume this is an IP checksum (not SCTP CRC) */
    4784             : 
    4785         430 :         if (features & NETIF_F_HW_CSUM) {
    4786             :                 /* Can checksum everything */
    4787             :                 return true;
    4788             :         }
    4789             : 
    4790         430 :         switch (protocol) {
    4791         430 :         case htons(ETH_P_IP):
    4792         430 :                 return !!(features & NETIF_F_IP_CSUM);
    4793           0 :         case htons(ETH_P_IPV6):
    4794           0 :                 return !!(features & NETIF_F_IPV6_CSUM);
    4795             :         default:
    4796             :                 return false;
    4797             :         }
    4798             : }
    4799             : 
    4800             : #ifdef CONFIG_BUG
    4801             : void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
    4802             : #else
    4803             : static inline void netdev_rx_csum_fault(struct net_device *dev,
    4804             :                                         struct sk_buff *skb)
    4805             : {
    4806             : }
    4807             : #endif
    4808             : /* rx skb timestamps */
    4809             : void net_enable_timestamp(void);
    4810             : void net_disable_timestamp(void);
    4811             : 
    4812             : #ifdef CONFIG_PROC_FS
    4813             : int __init dev_proc_init(void);
    4814             : #else
    4815             : #define dev_proc_init() 0
    4816             : #endif
    4817             : 
    4818         448 : static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
    4819             :                                               struct sk_buff *skb, struct net_device *dev,
    4820             :                                               bool more)
    4821             : {
    4822         448 :         __this_cpu_write(softnet_data.xmit.more, more);
    4823         448 :         return ops->ndo_start_xmit(skb, dev);
    4824             : }
    4825             : 
    4826         448 : static inline bool netdev_xmit_more(void)
    4827             : {
    4828         448 :         return __this_cpu_read(softnet_data.xmit.more);
    4829             : }
    4830             : 
    4831         448 : static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
    4832             :                                             struct netdev_queue *txq, bool more)
    4833             : {
    4834         448 :         const struct net_device_ops *ops = dev->netdev_ops;
    4835         448 :         netdev_tx_t rc;
    4836             : 
    4837         448 :         rc = __netdev_start_xmit(ops, skb, dev, more);
    4838         448 :         if (rc == NETDEV_TX_OK)
    4839         448 :                 txq_trans_update(txq);
    4840             : 
    4841         448 :         return rc;
    4842             : }
    4843             : 
    4844             : int netdev_class_create_file_ns(const struct class_attribute *class_attr,
    4845             :                                 const void *ns);
    4846             : void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
    4847             :                                  const void *ns);
    4848             : 
    4849             : extern const struct kobj_ns_type_operations net_ns_type_operations;
    4850             : 
    4851             : const char *netdev_drivername(const struct net_device *dev);
    4852             : 
    4853             : void linkwatch_run_queue(void);
    4854             : 
    4855           0 : static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
    4856             :                                                           netdev_features_t f2)
    4857             : {
    4858           0 :         if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
    4859           0 :                 if (f1 & NETIF_F_HW_CSUM)
    4860           0 :                         f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
    4861             :                 else
    4862           0 :                         f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
    4863             :         }
    4864             : 
    4865           0 :         return f1 & f2;
    4866             : }
    4867             : 
    4868           4 : static inline netdev_features_t netdev_get_wanted_features(
    4869             :         struct net_device *dev)
    4870             : {
    4871           4 :         return (dev->features & ~dev->hw_features) | dev->wanted_features;
    4872             : }
    4873             : netdev_features_t netdev_increment_features(netdev_features_t all,
    4874             :         netdev_features_t one, netdev_features_t mask);
    4875             : 
    4876             : /* Allow TSO being used on stacked device :
    4877             :  * Performing the GSO segmentation before last device
    4878             :  * is a performance improvement.
    4879             :  */
    4880             : static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
    4881             :                                                         netdev_features_t mask)
    4882             : {
    4883             :         return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
    4884             : }
    4885             : 
    4886             : int __netdev_update_features(struct net_device *dev);
    4887             : void netdev_update_features(struct net_device *dev);
    4888             : void netdev_change_features(struct net_device *dev);
    4889             : 
    4890             : void netif_stacked_transfer_operstate(const struct net_device *rootdev,
    4891             :                                         struct net_device *dev);
    4892             : 
    4893             : netdev_features_t passthru_features_check(struct sk_buff *skb,
    4894             :                                           struct net_device *dev,
    4895             :                                           netdev_features_t features);
    4896             : netdev_features_t netif_skb_features(struct sk_buff *skb);
    4897             : 
    4898           8 : static inline bool net_gso_ok(netdev_features_t features, int gso_type)
    4899             : {
    4900           8 :         netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
    4901             : 
    4902             :         /* check flags correspondence */
    4903           8 :         BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
    4904           8 :         BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
    4905           8 :         BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
    4906           8 :         BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
    4907           8 :         BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
    4908           8 :         BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
    4909           8 :         BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
    4910           8 :         BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
    4911           8 :         BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
    4912           8 :         BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
    4913           8 :         BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
    4914           8 :         BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
    4915           8 :         BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
    4916           8 :         BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
    4917           8 :         BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
    4918           8 :         BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
    4919           8 :         BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
    4920           8 :         BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
    4921           8 :         BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
    4922             : 
    4923           8 :         return (features & feature) == feature;
    4924             : }
    4925             : 
    4926           0 : static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
    4927             : {
    4928           0 :         return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
    4929           0 :                (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
    4930             : }
    4931             : 
    4932         448 : static inline bool netif_needs_gso(struct sk_buff *skb,
    4933             :                                    netdev_features_t features)
    4934             : {
    4935         448 :         return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
    4936           0 :                 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
    4937             :                          (skb->ip_summed != CHECKSUM_UNNECESSARY)));
    4938             : }
    4939             : 
    4940           0 : static inline void netif_set_gso_max_size(struct net_device *dev,
    4941             :                                           unsigned int size)
    4942             : {
    4943           0 :         dev->gso_max_size = size;
    4944           0 : }
    4945             : 
    4946           0 : static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
    4947             :                                         int pulled_hlen, u16 mac_offset,
    4948             :                                         int mac_len)
    4949             : {
    4950           0 :         skb->protocol = protocol;
    4951           0 :         skb->encapsulation = 1;
    4952           0 :         skb_push(skb, pulled_hlen);
    4953           0 :         skb_reset_transport_header(skb);
    4954           0 :         skb->mac_header = mac_offset;
    4955           0 :         skb->network_header = skb->mac_header + mac_len;
    4956           0 :         skb->mac_len = mac_len;
    4957             : }
    4958             : 
    4959             : static inline bool netif_is_macsec(const struct net_device *dev)
    4960             : {
    4961             :         return dev->priv_flags & IFF_MACSEC;
    4962             : }
    4963             : 
    4964           0 : static inline bool netif_is_macvlan(const struct net_device *dev)
    4965             : {
    4966           0 :         return dev->priv_flags & IFF_MACVLAN;
    4967             : }
    4968             : 
    4969             : static inline bool netif_is_macvlan_port(const struct net_device *dev)
    4970             : {
    4971             :         return dev->priv_flags & IFF_MACVLAN_PORT;
    4972             : }
    4973             : 
    4974           0 : static inline bool netif_is_bond_master(const struct net_device *dev)
    4975             : {
    4976           0 :         return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
    4977             : }
    4978             : 
    4979           0 : static inline bool netif_is_bond_slave(const struct net_device *dev)
    4980             : {
    4981           0 :         return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
    4982             : }
    4983             : 
    4984           0 : static inline bool netif_supports_nofcs(struct net_device *dev)
    4985             : {
    4986           0 :         return dev->priv_flags & IFF_SUPP_NOFCS;
    4987             : }
    4988             : 
    4989             : static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
    4990             : {
    4991             :         return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
    4992             : }
    4993             : 
    4994           0 : static inline bool netif_is_l3_master(const struct net_device *dev)
    4995             : {
    4996           0 :         return dev->priv_flags & IFF_L3MDEV_MASTER;
    4997             : }
    4998             : 
    4999           0 : static inline bool netif_is_l3_slave(const struct net_device *dev)
    5000             : {
    5001           0 :         return dev->priv_flags & IFF_L3MDEV_SLAVE;
    5002             : }
    5003             : 
    5004             : static inline bool netif_is_bridge_master(const struct net_device *dev)
    5005             : {
    5006             :         return dev->priv_flags & IFF_EBRIDGE;
    5007             : }
    5008             : 
    5009           0 : static inline bool netif_is_bridge_port(const struct net_device *dev)
    5010             : {
    5011           0 :         return dev->priv_flags & IFF_BRIDGE_PORT;
    5012             : }
    5013             : 
    5014             : static inline bool netif_is_ovs_master(const struct net_device *dev)
    5015             : {
    5016             :         return dev->priv_flags & IFF_OPENVSWITCH;
    5017             : }
    5018             : 
    5019           0 : static inline bool netif_is_ovs_port(const struct net_device *dev)
    5020             : {
    5021           0 :         return dev->priv_flags & IFF_OVS_DATAPATH;
    5022             : }
    5023             : 
    5024           0 : static inline bool netif_is_any_bridge_port(const struct net_device *dev)
    5025             : {
    5026           0 :         return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
    5027             : }
    5028             : 
    5029           0 : static inline bool netif_is_team_master(const struct net_device *dev)
    5030             : {
    5031           0 :         return dev->priv_flags & IFF_TEAM;
    5032             : }
    5033             : 
    5034           0 : static inline bool netif_is_team_port(const struct net_device *dev)
    5035             : {
    5036           0 :         return dev->priv_flags & IFF_TEAM_PORT;
    5037             : }
    5038             : 
    5039           0 : static inline bool netif_is_lag_master(const struct net_device *dev)
    5040             : {
    5041           0 :         return netif_is_bond_master(dev) || netif_is_team_master(dev);
    5042             : }
    5043             : 
    5044           0 : static inline bool netif_is_lag_port(const struct net_device *dev)
    5045             : {
    5046           0 :         return netif_is_bond_slave(dev) || netif_is_team_port(dev);
    5047             : }
    5048             : 
    5049           0 : static inline bool netif_is_rxfh_configured(const struct net_device *dev)
    5050             : {
    5051           0 :         return dev->priv_flags & IFF_RXFH_CONFIGURED;
    5052             : }
    5053             : 
    5054           6 : static inline bool netif_is_failover(const struct net_device *dev)
    5055             : {
    5056           6 :         return dev->priv_flags & IFF_FAILOVER;
    5057             : }
    5058             : 
    5059           2 : static inline bool netif_is_failover_slave(const struct net_device *dev)
    5060             : {
    5061           2 :         return dev->priv_flags & IFF_FAILOVER_SLAVE;
    5062             : }
    5063             : 
    5064             : /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
    5065           2 : static inline void netif_keep_dst(struct net_device *dev)
    5066             : {
    5067           2 :         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
    5068             : }
    5069             : 
    5070             : /* return true if dev can't cope with mtu frames that need vlan tag insertion */
    5071             : static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
    5072             : {
    5073             :         /* TODO: reserve and use an additional IFF bit, if we get more users */
    5074             :         return dev->priv_flags & IFF_MACSEC;
    5075             : }
    5076             : 
    5077             : extern struct pernet_operations __net_initdata loopback_net_ops;
    5078             : 
    5079             : /* Logging, debugging and troubleshooting/diagnostic helpers. */
    5080             : 
    5081             : /* netdev_printk helpers, similar to dev_printk */
    5082             : 
    5083           0 : static inline const char *netdev_name(const struct net_device *dev)
    5084             : {
    5085           0 :         if (!dev->name[0] || strchr(dev->name, '%'))
    5086           0 :                 return "(unnamed net_device)";
    5087             :         return dev->name;
    5088             : }
    5089             : 
    5090             : static inline bool netdev_unregistering(const struct net_device *dev)
    5091             : {
    5092             :         return dev->reg_state == NETREG_UNREGISTERING;
    5093             : }
    5094             : 
    5095           0 : static inline const char *netdev_reg_state(const struct net_device *dev)
    5096             : {
    5097           0 :         switch (dev->reg_state) {
    5098             :         case NETREG_UNINITIALIZED: return " (uninitialized)";
    5099             :         case NETREG_REGISTERED: return "";
    5100             :         case NETREG_UNREGISTERING: return " (unregistering)";
    5101             :         case NETREG_UNREGISTERED: return " (unregistered)";
    5102             :         case NETREG_RELEASED: return " (released)";
    5103             :         case NETREG_DUMMY: return " (dummy)";
    5104             :         }
    5105             : 
    5106           0 :         WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
    5107             :         return " (unknown)";
    5108             : }
    5109             : 
    5110             : __printf(3, 4) __cold
    5111             : void netdev_printk(const char *level, const struct net_device *dev,
    5112             :                    const char *format, ...);
    5113             : __printf(2, 3) __cold
    5114             : void netdev_emerg(const struct net_device *dev, const char *format, ...);
    5115             : __printf(2, 3) __cold
    5116             : void netdev_alert(const struct net_device *dev, const char *format, ...);
    5117             : __printf(2, 3) __cold
    5118             : void netdev_crit(const struct net_device *dev, const char *format, ...);
    5119             : __printf(2, 3) __cold
    5120             : void netdev_err(const struct net_device *dev, const char *format, ...);
    5121             : __printf(2, 3) __cold
    5122             : void netdev_warn(const struct net_device *dev, const char *format, ...);
    5123             : __printf(2, 3) __cold
    5124             : void netdev_notice(const struct net_device *dev, const char *format, ...);
    5125             : __printf(2, 3) __cold
    5126             : void netdev_info(const struct net_device *dev, const char *format, ...);
    5127             : 
    5128             : #define netdev_level_once(level, dev, fmt, ...)                 \
    5129             : do {                                                            \
    5130             :         static bool __print_once __read_mostly;                 \
    5131             :                                                                 \
    5132             :         if (!__print_once) {                                    \
    5133             :                 __print_once = true;                            \
    5134             :                 netdev_printk(level, dev, fmt, ##__VA_ARGS__);  \
    5135             :         }                                                       \
    5136             : } while (0)
    5137             : 
    5138             : #define netdev_emerg_once(dev, fmt, ...) \
    5139             :         netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
    5140             : #define netdev_alert_once(dev, fmt, ...) \
    5141             :         netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
    5142             : #define netdev_crit_once(dev, fmt, ...) \
    5143             :         netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
    5144             : #define netdev_err_once(dev, fmt, ...) \
    5145             :         netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
    5146             : #define netdev_warn_once(dev, fmt, ...) \
    5147             :         netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
    5148             : #define netdev_notice_once(dev, fmt, ...) \
    5149             :         netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
    5150             : #define netdev_info_once(dev, fmt, ...) \
    5151             :         netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
    5152             : 
    5153             : #define MODULE_ALIAS_NETDEV(device) \
    5154             :         MODULE_ALIAS("netdev-" device)
    5155             : 
    5156             : #if defined(CONFIG_DYNAMIC_DEBUG) || \
    5157             :         (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
    5158             : #define netdev_dbg(__dev, format, args...)                      \
    5159             : do {                                                            \
    5160             :         dynamic_netdev_dbg(__dev, format, ##args);              \
    5161             : } while (0)
    5162             : #elif defined(DEBUG)
    5163             : #define netdev_dbg(__dev, format, args...)                      \
    5164             :         netdev_printk(KERN_DEBUG, __dev, format, ##args)
    5165             : #else
    5166             : #define netdev_dbg(__dev, format, args...)                      \
    5167             : ({                                                              \
    5168             :         if (0)                                                  \
    5169             :                 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
    5170             : })
    5171             : #endif
    5172             : 
    5173             : #if defined(VERBOSE_DEBUG)
    5174             : #define netdev_vdbg     netdev_dbg
    5175             : #else
    5176             : 
    5177             : #define netdev_vdbg(dev, format, args...)                       \
    5178             : ({                                                              \
    5179             :         if (0)                                                  \
    5180             :                 netdev_printk(KERN_DEBUG, dev, format, ##args); \
    5181             :         0;                                                      \
    5182             : })
    5183             : #endif
    5184             : 
    5185             : /*
    5186             :  * netdev_WARN() acts like dev_printk(), but with the key difference
    5187             :  * of using a WARN/WARN_ON to get the message out, including the
    5188             :  * file/line information and a backtrace.
    5189             :  */
    5190             : #define netdev_WARN(dev, format, args...)                       \
    5191             :         WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
    5192             :              netdev_reg_state(dev), ##args)
    5193             : 
    5194             : #define netdev_WARN_ONCE(dev, format, args...)                          \
    5195             :         WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),    \
    5196             :                   netdev_reg_state(dev), ##args)
    5197             : 
    5198             : /* netif printk helpers, similar to netdev_printk */
    5199             : 
    5200             : #define netif_printk(priv, type, level, dev, fmt, args...)      \
    5201             : do {                                                            \
    5202             :         if (netif_msg_##type(priv))                             \
    5203             :                 netdev_printk(level, (dev), fmt, ##args);       \
    5204             : } while (0)
    5205             : 
    5206             : #define netif_level(level, priv, type, dev, fmt, args...)       \
    5207             : do {                                                            \
    5208             :         if (netif_msg_##type(priv))                             \
    5209             :                 netdev_##level(dev, fmt, ##args);               \
    5210             : } while (0)
    5211             : 
    5212             : #define netif_emerg(priv, type, dev, fmt, args...)              \
    5213             :         netif_level(emerg, priv, type, dev, fmt, ##args)
    5214             : #define netif_alert(priv, type, dev, fmt, args...)              \
    5215             :         netif_level(alert, priv, type, dev, fmt, ##args)
    5216             : #define netif_crit(priv, type, dev, fmt, args...)               \
    5217             :         netif_level(crit, priv, type, dev, fmt, ##args)
    5218             : #define netif_err(priv, type, dev, fmt, args...)                \
    5219             :         netif_level(err, priv, type, dev, fmt, ##args)
    5220             : #define netif_warn(priv, type, dev, fmt, args...)               \
    5221             :         netif_level(warn, priv, type, dev, fmt, ##args)
    5222             : #define netif_notice(priv, type, dev, fmt, args...)             \
    5223             :         netif_level(notice, priv, type, dev, fmt, ##args)
    5224             : #define netif_info(priv, type, dev, fmt, args...)               \
    5225             :         netif_level(info, priv, type, dev, fmt, ##args)
    5226             : 
    5227             : #if defined(CONFIG_DYNAMIC_DEBUG) || \
    5228             :         (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
    5229             : #define netif_dbg(priv, type, netdev, format, args...)          \
    5230             : do {                                                            \
    5231             :         if (netif_msg_##type(priv))                             \
    5232             :                 dynamic_netdev_dbg(netdev, format, ##args);     \
    5233             : } while (0)
    5234             : #elif defined(DEBUG)
    5235             : #define netif_dbg(priv, type, dev, format, args...)             \
    5236             :         netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
    5237             : #else
    5238             : #define netif_dbg(priv, type, dev, format, args...)                     \
    5239             : ({                                                                      \
    5240             :         if (0)                                                          \
    5241             :                 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
    5242             :         0;                                                              \
    5243             : })
    5244             : #endif
    5245             : 
    5246             : /* if @cond then downgrade to debug, else print at @level */
    5247             : #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...)     \
    5248             :         do {                                                              \
    5249             :                 if (cond)                                                 \
    5250             :                         netif_dbg(priv, type, netdev, fmt, ##args);       \
    5251             :                 else                                                      \
    5252             :                         netif_ ## level(priv, type, netdev, fmt, ##args); \
    5253             :         } while (0)
    5254             : 
    5255             : #if defined(VERBOSE_DEBUG)
    5256             : #define netif_vdbg      netif_dbg
    5257             : #else
    5258             : #define netif_vdbg(priv, type, dev, format, args...)            \
    5259             : ({                                                              \
    5260             :         if (0)                                                  \
    5261             :                 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
    5262             :         0;                                                      \
    5263             : })
    5264             : #endif
    5265             : 
    5266             : /*
    5267             :  *      The list of packet types we will receive (as opposed to discard)
    5268             :  *      and the routines to invoke.
    5269             :  *
    5270             :  *      Why 16. Because with 16 the only overlap we get on a hash of the
    5271             :  *      low nibble of the protocol value is RARP/SNAP/X.25.
    5272             :  *
    5273             :  *              0800    IP
    5274             :  *              0001    802.3
    5275             :  *              0002    AX.25
    5276             :  *              0004    802.2
    5277             :  *              8035    RARP
    5278             :  *              0005    SNAP
    5279             :  *              0805    X.25
    5280             :  *              0806    ARP
    5281             :  *              8137    IPX
    5282             :  *              0009    Localtalk
    5283             :  *              86DD    IPv6
    5284             :  */
    5285             : #define PTYPE_HASH_SIZE (16)
    5286             : #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
    5287             : 
    5288             : extern struct net_device *blackhole_netdev;
    5289             : 
    5290             : #endif  /* _LINUX_NETDEVICE_H */

Generated by: LCOV version 1.14