LCOV - code coverage report
Current view: top level - net/core - xdp.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 24 248 9.7 %
Date: 2021-04-22 12:43:58 Functions: 2 23 8.7 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /* net/core/xdp.c
       3             :  *
       4             :  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
       5             :  */
       6             : #include <linux/bpf.h>
       7             : #include <linux/filter.h>
       8             : #include <linux/types.h>
       9             : #include <linux/mm.h>
      10             : #include <linux/netdevice.h>
      11             : #include <linux/slab.h>
      12             : #include <linux/idr.h>
      13             : #include <linux/rhashtable.h>
      14             : #include <linux/bug.h>
      15             : #include <net/page_pool.h>
      16             : 
      17             : #include <net/xdp.h>
      18             : #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
      19             : #include <trace/events/xdp.h>
      20             : #include <net/xdp_sock_drv.h>
      21             : 
      22             : #define REG_STATE_NEW           0x0
      23             : #define REG_STATE_REGISTERED    0x1
      24             : #define REG_STATE_UNREGISTERED  0x2
      25             : #define REG_STATE_UNUSED        0x3
      26             : 
      27             : static DEFINE_IDA(mem_id_pool);
      28             : static DEFINE_MUTEX(mem_id_lock);
      29             : #define MEM_ID_MAX 0xFFFE
      30             : #define MEM_ID_MIN 1
      31             : static int mem_id_next = MEM_ID_MIN;
      32             : 
      33             : static bool mem_id_init; /* false */
      34             : static struct rhashtable *mem_id_ht;
      35             : 
      36           0 : static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
      37             : {
      38           0 :         const u32 *k = data;
      39           0 :         const u32 key = *k;
      40             : 
      41           0 :         BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
      42             :                      != sizeof(u32));
      43             : 
      44             :         /* Use cyclic increasing ID as direct hash key */
      45           0 :         return key;
      46             : }
      47             : 
      48           0 : static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
      49             :                           const void *ptr)
      50             : {
      51           0 :         const struct xdp_mem_allocator *xa = ptr;
      52           0 :         u32 mem_id = *(u32 *)arg->key;
      53             : 
      54           0 :         return xa->mem.id != mem_id;
      55             : }
      56             : 
      57             : static const struct rhashtable_params mem_id_rht_params = {
      58             :         .nelem_hint = 64,
      59             :         .head_offset = offsetof(struct xdp_mem_allocator, node),
      60             :         .key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
      61             :         .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
      62             :         .max_size = MEM_ID_MAX,
      63             :         .min_size = 8,
      64             :         .automatic_shrinking = true,
      65             :         .hashfn    = xdp_mem_id_hashfn,
      66             :         .obj_cmpfn = xdp_mem_id_cmp,
      67             : };
      68             : 
      69             : static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
      70             : {
      71             :         struct xdp_mem_allocator *xa;
      72             : 
      73             :         xa = container_of(rcu, struct xdp_mem_allocator, rcu);
      74             : 
      75             :         /* Allow this ID to be reused */
      76             :         ida_simple_remove(&mem_id_pool, xa->mem.id);
      77             : 
      78             :         kfree(xa);
      79             : }
      80             : 
      81             : static void mem_xa_remove(struct xdp_mem_allocator *xa)
      82             : {
      83             :         trace_mem_disconnect(xa);
      84             : 
      85             :         if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
      86             :                 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
      87             : }
      88             : 
      89             : static void mem_allocator_disconnect(void *allocator)
      90             : {
      91             :         struct xdp_mem_allocator *xa;
      92             :         struct rhashtable_iter iter;
      93             : 
      94             :         mutex_lock(&mem_id_lock);
      95             : 
      96             :         rhashtable_walk_enter(mem_id_ht, &iter);
      97             :         do {
      98             :                 rhashtable_walk_start(&iter);
      99             : 
     100             :                 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
     101             :                         if (xa->allocator == allocator)
     102             :                                 mem_xa_remove(xa);
     103             :                 }
     104             : 
     105             :                 rhashtable_walk_stop(&iter);
     106             : 
     107             :         } while (xa == ERR_PTR(-EAGAIN));
     108             :         rhashtable_walk_exit(&iter);
     109             : 
     110             :         mutex_unlock(&mem_id_lock);
     111             : }
     112             : 
     113           0 : void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
     114             : {
     115           0 :         struct xdp_mem_allocator *xa;
     116           0 :         int id = xdp_rxq->mem.id;
     117             : 
     118           0 :         if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
     119           0 :                 WARN(1, "Missing register, driver bug");
     120           0 :                 return;
     121             :         }
     122             : 
     123           0 :         if (id == 0)
     124             :                 return;
     125             : 
     126           0 :         if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
     127           0 :                 rcu_read_lock();
     128           0 :                 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
     129           0 :                 page_pool_destroy(xa->page_pool);
     130           0 :                 rcu_read_unlock();
     131             :         }
     132             : }
     133             : EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
     134             : 
     135           0 : void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
     136             : {
     137             :         /* Simplify driver cleanup code paths, allow unreg "unused" */
     138           0 :         if (xdp_rxq->reg_state == REG_STATE_UNUSED)
     139             :                 return;
     140             : 
     141           0 :         WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
     142             : 
     143           0 :         xdp_rxq_info_unreg_mem_model(xdp_rxq);
     144             : 
     145           0 :         xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
     146           0 :         xdp_rxq->dev = NULL;
     147             : 
     148             :         /* Reset mem info to defaults */
     149           0 :         xdp_rxq->mem.id = 0;
     150           0 :         xdp_rxq->mem.type = 0;
     151             : }
     152             : EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
     153             : 
     154           4 : static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
     155             : {
     156           4 :         memset(xdp_rxq, 0, sizeof(*xdp_rxq));
     157             : }
     158             : 
     159             : /* Returns 0 on success, negative on failure */
     160           4 : int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
     161             :                      struct net_device *dev, u32 queue_index, unsigned int napi_id)
     162             : {
     163           4 :         if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
     164           0 :                 WARN(1, "Driver promised not to register this");
     165           0 :                 return -EINVAL;
     166             :         }
     167             : 
     168           4 :         if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
     169           0 :                 WARN(1, "Missing unregister, handled but fix driver");
     170           0 :                 xdp_rxq_info_unreg(xdp_rxq);
     171             :         }
     172             : 
     173           4 :         if (!dev) {
     174           0 :                 WARN(1, "Missing net_device from driver");
     175           0 :                 return -ENODEV;
     176             :         }
     177             : 
     178             :         /* State either UNREGISTERED or NEW */
     179           4 :         xdp_rxq_info_init(xdp_rxq);
     180           4 :         xdp_rxq->dev = dev;
     181           4 :         xdp_rxq->queue_index = queue_index;
     182           4 :         xdp_rxq->napi_id = napi_id;
     183             : 
     184           4 :         xdp_rxq->reg_state = REG_STATE_REGISTERED;
     185           4 :         return 0;
     186             : }
     187             : EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
     188             : 
     189           0 : void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
     190             : {
     191           0 :         xdp_rxq->reg_state = REG_STATE_UNUSED;
     192           0 : }
     193             : EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
     194             : 
     195           0 : bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
     196             : {
     197           0 :         return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
     198             : }
     199             : EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
     200             : 
     201           0 : static int __mem_id_init_hash_table(void)
     202             : {
     203           0 :         struct rhashtable *rht;
     204           0 :         int ret;
     205             : 
     206           0 :         if (unlikely(mem_id_init))
     207             :                 return 0;
     208             : 
     209           0 :         rht = kzalloc(sizeof(*rht), GFP_KERNEL);
     210           0 :         if (!rht)
     211             :                 return -ENOMEM;
     212             : 
     213           0 :         ret = rhashtable_init(rht, &mem_id_rht_params);
     214           0 :         if (ret < 0) {
     215           0 :                 kfree(rht);
     216           0 :                 return ret;
     217             :         }
     218           0 :         mem_id_ht = rht;
     219           0 :         smp_mb(); /* mutex lock should provide enough pairing */
     220           0 :         mem_id_init = true;
     221             : 
     222           0 :         return 0;
     223             : }
     224             : 
     225             : /* Allocate a cyclic ID that maps to allocator pointer.
     226             :  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
     227             :  *
     228             :  * Caller must lock mem_id_lock.
     229             :  */
     230           0 : static int __mem_id_cyclic_get(gfp_t gfp)
     231             : {
     232           0 :         int retries = 1;
     233           0 :         int id;
     234             : 
     235           0 : again:
     236           0 :         id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
     237           0 :         if (id < 0) {
     238           0 :                 if (id == -ENOSPC) {
     239             :                         /* Cyclic allocator, reset next id */
     240           0 :                         if (retries--) {
     241           0 :                                 mem_id_next = MEM_ID_MIN;
     242           0 :                                 goto again;
     243             :                         }
     244             :                 }
     245           0 :                 return id; /* errno */
     246             :         }
     247           0 :         mem_id_next = id + 1;
     248             : 
     249           0 :         return id;
     250             : }
     251             : 
     252           1 : static bool __is_supported_mem_type(enum xdp_mem_type type)
     253             : {
     254           1 :         if (type == MEM_TYPE_PAGE_POOL)
     255             :                 return is_page_pool_compiled_in();
     256             : 
     257           1 :         if (type >= MEM_TYPE_MAX)
     258             :                 return false;
     259             : 
     260             :         return true;
     261             : }
     262             : 
     263           1 : int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
     264             :                                enum xdp_mem_type type, void *allocator)
     265             : {
     266           1 :         struct xdp_mem_allocator *xdp_alloc;
     267           1 :         gfp_t gfp = GFP_KERNEL;
     268           1 :         int id, errno, ret;
     269           1 :         void *ptr;
     270             : 
     271           1 :         if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
     272           0 :                 WARN(1, "Missing register, driver bug");
     273           0 :                 return -EFAULT;
     274             :         }
     275             : 
     276           1 :         if (!__is_supported_mem_type(type))
     277             :                 return -EOPNOTSUPP;
     278             : 
     279           1 :         xdp_rxq->mem.type = type;
     280             : 
     281           1 :         if (!allocator) {
     282             :                 if (type == MEM_TYPE_PAGE_POOL)
     283             :                         return -EINVAL; /* Setup time check page_pool req */
     284             :                 return 0;
     285             :         }
     286             : 
     287             :         /* Delay init of rhashtable to save memory if feature isn't used */
     288           0 :         if (!mem_id_init) {
     289           0 :                 mutex_lock(&mem_id_lock);
     290           0 :                 ret = __mem_id_init_hash_table();
     291           0 :                 mutex_unlock(&mem_id_lock);
     292           0 :                 if (ret < 0) {
     293           0 :                         WARN_ON(1);
     294           0 :                         return ret;
     295             :                 }
     296             :         }
     297             : 
     298           0 :         xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
     299           0 :         if (!xdp_alloc)
     300             :                 return -ENOMEM;
     301             : 
     302           0 :         mutex_lock(&mem_id_lock);
     303           0 :         id = __mem_id_cyclic_get(gfp);
     304           0 :         if (id < 0) {
     305           0 :                 errno = id;
     306           0 :                 goto err;
     307             :         }
     308           0 :         xdp_rxq->mem.id = id;
     309           0 :         xdp_alloc->mem  = xdp_rxq->mem;
     310           0 :         xdp_alloc->allocator = allocator;
     311             : 
     312             :         /* Insert allocator into ID lookup table */
     313           0 :         ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
     314           0 :         if (IS_ERR(ptr)) {
     315           0 :                 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
     316           0 :                 xdp_rxq->mem.id = 0;
     317           0 :                 errno = PTR_ERR(ptr);
     318           0 :                 goto err;
     319             :         }
     320             : 
     321           0 :         if (type == MEM_TYPE_PAGE_POOL)
     322           0 :                 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
     323             : 
     324           0 :         mutex_unlock(&mem_id_lock);
     325             : 
     326           0 :         trace_mem_connect(xdp_alloc, xdp_rxq);
     327           0 :         return 0;
     328           0 : err:
     329           0 :         mutex_unlock(&mem_id_lock);
     330           0 :         kfree(xdp_alloc);
     331           0 :         return errno;
     332             : }
     333             : EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
     334             : 
     335             : /* XDP RX runs under NAPI protection, and in different delivery error
     336             :  * scenarios (e.g. queue full), it is possible to return the xdp_frame
     337             :  * while still leveraging this protection.  The @napi_direct boolean
     338             :  * is used for those calls sites.  Thus, allowing for faster recycling
     339             :  * of xdp_frames/pages in those cases.
     340             :  */
     341           0 : static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
     342             :                          struct xdp_buff *xdp)
     343             : {
     344           0 :         struct xdp_mem_allocator *xa;
     345           0 :         struct page *page;
     346             : 
     347           0 :         switch (mem->type) {
     348             :         case MEM_TYPE_PAGE_POOL:
     349           0 :                 rcu_read_lock();
     350             :                 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
     351           0 :                 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
     352           0 :                 page = virt_to_head_page(data);
     353           0 :                 napi_direct &= !xdp_return_frame_no_direct();
     354           0 :                 page_pool_put_full_page(xa->page_pool, page, napi_direct);
     355           0 :                 rcu_read_unlock();
     356           0 :                 break;
     357           0 :         case MEM_TYPE_PAGE_SHARED:
     358           0 :                 page_frag_free(data);
     359           0 :                 break;
     360           0 :         case MEM_TYPE_PAGE_ORDER0:
     361           0 :                 page = virt_to_page(data); /* Assumes order0 page*/
     362           0 :                 put_page(page);
     363           0 :                 break;
     364             :         case MEM_TYPE_XSK_BUFF_POOL:
     365             :                 /* NB! Only valid from an xdp_buff! */
     366           0 :                 xsk_buff_free(xdp);
     367             :                 break;
     368             :         default:
     369             :                 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
     370           0 :                 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
     371           0 :                 break;
     372             :         }
     373           0 : }
     374             : 
     375           0 : void xdp_return_frame(struct xdp_frame *xdpf)
     376             : {
     377           0 :         __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
     378           0 : }
     379             : EXPORT_SYMBOL_GPL(xdp_return_frame);
     380             : 
     381           0 : void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
     382             : {
     383           0 :         __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
     384           0 : }
     385             : EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
     386             : 
     387             : /* XDP bulk APIs introduce a defer/flush mechanism to return
     388             :  * pages belonging to the same xdp_mem_allocator object
     389             :  * (identified via the mem.id field) in bulk to optimize
     390             :  * I-cache and D-cache.
     391             :  * The bulk queue size is set to 16 to be aligned to how
     392             :  * XDP_REDIRECT bulking works. The bulk is flushed when
     393             :  * it is full or when mem.id changes.
     394             :  * xdp_frame_bulk is usually stored/allocated on the function
     395             :  * call-stack to avoid locking penalties.
     396             :  */
     397           0 : void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
     398             : {
     399           0 :         struct xdp_mem_allocator *xa = bq->xa;
     400             : 
     401           0 :         if (unlikely(!xa || !bq->count))
     402             :                 return;
     403             : 
     404           0 :         page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
     405             :         /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
     406           0 :         bq->count = 0;
     407             : }
     408             : EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
     409             : 
     410             : /* Must be called with rcu_read_lock held */
     411           0 : void xdp_return_frame_bulk(struct xdp_frame *xdpf,
     412             :                            struct xdp_frame_bulk *bq)
     413             : {
     414           0 :         struct xdp_mem_info *mem = &xdpf->mem;
     415           0 :         struct xdp_mem_allocator *xa;
     416             : 
     417           0 :         if (mem->type != MEM_TYPE_PAGE_POOL) {
     418           0 :                 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
     419           0 :                 return;
     420             :         }
     421             : 
     422           0 :         xa = bq->xa;
     423           0 :         if (unlikely(!xa)) {
     424           0 :                 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
     425           0 :                 bq->count = 0;
     426           0 :                 bq->xa = xa;
     427             :         }
     428             : 
     429           0 :         if (bq->count == XDP_BULK_QUEUE_SIZE)
     430           0 :                 xdp_flush_frame_bulk(bq);
     431             : 
     432           0 :         if (unlikely(mem->id != xa->mem.id)) {
     433           0 :                 xdp_flush_frame_bulk(bq);
     434           0 :                 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
     435             :         }
     436             : 
     437           0 :         bq->q[bq->count++] = xdpf->data;
     438             : }
     439             : EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
     440             : 
     441           0 : void xdp_return_buff(struct xdp_buff *xdp)
     442             : {
     443           0 :         __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
     444           0 : }
     445             : 
     446             : /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
     447           0 : void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
     448             : {
     449           0 :         struct xdp_mem_allocator *xa;
     450           0 :         struct page *page;
     451             : 
     452           0 :         rcu_read_lock();
     453           0 :         xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
     454           0 :         page = virt_to_head_page(data);
     455           0 :         if (xa)
     456           0 :                 page_pool_release_page(xa->page_pool, page);
     457           0 :         rcu_read_unlock();
     458           0 : }
     459             : EXPORT_SYMBOL_GPL(__xdp_release_frame);
     460             : 
     461           0 : void xdp_attachment_setup(struct xdp_attachment_info *info,
     462             :                           struct netdev_bpf *bpf)
     463             : {
     464           0 :         if (info->prog)
     465           0 :                 bpf_prog_put(info->prog);
     466           0 :         info->prog = bpf->prog;
     467           0 :         info->flags = bpf->flags;
     468           0 : }
     469             : EXPORT_SYMBOL_GPL(xdp_attachment_setup);
     470             : 
     471           0 : struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
     472             : {
     473           0 :         unsigned int metasize, totsize;
     474           0 :         void *addr, *data_to_copy;
     475           0 :         struct xdp_frame *xdpf;
     476           0 :         struct page *page;
     477             : 
     478             :         /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
     479           0 :         metasize = xdp_data_meta_unsupported(xdp) ? 0 :
     480           0 :                    xdp->data - xdp->data_meta;
     481           0 :         totsize = xdp->data_end - xdp->data + metasize;
     482             : 
     483           0 :         if (sizeof(*xdpf) + totsize > PAGE_SIZE)
     484             :                 return NULL;
     485             : 
     486           0 :         page = dev_alloc_page();
     487           0 :         if (!page)
     488             :                 return NULL;
     489             : 
     490           0 :         addr = page_to_virt(page);
     491           0 :         xdpf = addr;
     492           0 :         memset(xdpf, 0, sizeof(*xdpf));
     493             : 
     494           0 :         addr += sizeof(*xdpf);
     495           0 :         data_to_copy = metasize ? xdp->data_meta : xdp->data;
     496           0 :         memcpy(addr, data_to_copy, totsize);
     497             : 
     498           0 :         xdpf->data = addr + metasize;
     499           0 :         xdpf->len = totsize - metasize;
     500           0 :         xdpf->headroom = 0;
     501           0 :         xdpf->metasize = metasize;
     502           0 :         xdpf->frame_sz = PAGE_SIZE;
     503           0 :         xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
     504             : 
     505           0 :         xsk_buff_free(xdp);
     506           0 :         return xdpf;
     507             : }
     508             : EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
     509             : 
     510             : /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
     511           0 : void xdp_warn(const char *msg, const char *func, const int line)
     512             : {
     513           0 :         WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
     514           0 : };
     515             : EXPORT_SYMBOL_GPL(xdp_warn);
     516             : 
     517           0 : int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
     518             : {
     519           0 :         n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
     520             :                                       n_skb, skbs);
     521           0 :         if (unlikely(!n_skb))
     522           0 :                 return -ENOMEM;
     523             : 
     524             :         return 0;
     525             : }
     526             : EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
     527             : 
     528           0 : struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
     529             :                                            struct sk_buff *skb,
     530             :                                            struct net_device *dev)
     531             : {
     532           0 :         unsigned int headroom, frame_size;
     533           0 :         void *hard_start;
     534             : 
     535             :         /* Part of headroom was reserved to xdpf */
     536           0 :         headroom = sizeof(*xdpf) + xdpf->headroom;
     537             : 
     538             :         /* Memory size backing xdp_frame data already have reserved
     539             :          * room for build_skb to place skb_shared_info in tailroom.
     540             :          */
     541           0 :         frame_size = xdpf->frame_sz;
     542             : 
     543           0 :         hard_start = xdpf->data - headroom;
     544           0 :         skb = build_skb_around(skb, hard_start, frame_size);
     545           0 :         if (unlikely(!skb))
     546             :                 return NULL;
     547             : 
     548           0 :         skb_reserve(skb, headroom);
     549           0 :         __skb_put(skb, xdpf->len);
     550           0 :         if (xdpf->metasize)
     551           0 :                 skb_metadata_set(skb, xdpf->metasize);
     552             : 
     553             :         /* Essential SKB info: protocol and skb->dev */
     554           0 :         skb->protocol = eth_type_trans(skb, dev);
     555             : 
     556             :         /* Optional SKB info, currently missing:
     557             :          * - HW checksum info           (skb->ip_summed)
     558             :          * - HW RX hash                 (skb_set_hash)
     559             :          * - RX ring dev queue index    (skb_record_rx_queue)
     560             :          */
     561             : 
     562             :         /* Until page_pool get SKB return path, release DMA here */
     563           0 :         xdp_release_frame(xdpf);
     564             : 
     565             :         /* Allow SKB to reuse area used by xdp_frame */
     566           0 :         xdp_scrub_frame(xdpf);
     567             : 
     568           0 :         return skb;
     569             : }
     570             : EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
     571             : 
     572           0 : struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
     573             :                                          struct net_device *dev)
     574             : {
     575           0 :         struct sk_buff *skb;
     576             : 
     577           0 :         skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
     578           0 :         if (unlikely(!skb))
     579             :                 return NULL;
     580             : 
     581           0 :         memset(skb, 0, offsetof(struct sk_buff, tail));
     582             : 
     583           0 :         return __xdp_build_skb_from_frame(xdpf, skb, dev);
     584             : }
     585             : EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);

Generated by: LCOV version 1.14