LCOV - code coverage report
Current view: top level - drivers/virtio - virtio_ring.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 343 856 40.1 %
Date: 2021-04-22 12:43:58 Functions: 37 60 61.7 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /* Virtio ring implementation.
       3             :  *
       4             :  *  Copyright 2007 Rusty Russell IBM Corporation
       5             :  */
       6             : #include <linux/virtio.h>
       7             : #include <linux/virtio_ring.h>
       8             : #include <linux/virtio_config.h>
       9             : #include <linux/device.h>
      10             : #include <linux/slab.h>
      11             : #include <linux/module.h>
      12             : #include <linux/hrtimer.h>
      13             : #include <linux/dma-mapping.h>
      14             : #include <xen/xen.h>
      15             : 
      16             : #ifdef DEBUG
      17             : /* For development, we want to crash whenever the ring is screwed. */
      18             : #define BAD_RING(_vq, fmt, args...)                             \
      19             :         do {                                                    \
      20             :                 dev_err(&(_vq)->vq.vdev->dev,                 \
      21             :                         "%s:"fmt, (_vq)->vq.name, ##args); \
      22             :                 BUG();                                          \
      23             :         } while (0)
      24             : /* Caller is supposed to guarantee no reentry. */
      25             : #define START_USE(_vq)                                          \
      26             :         do {                                                    \
      27             :                 if ((_vq)->in_use)                           \
      28             :                         panic("%s:in_use = %i\n",             \
      29             :                               (_vq)->vq.name, (_vq)->in_use);     \
      30             :                 (_vq)->in_use = __LINE__;                    \
      31             :         } while (0)
      32             : #define END_USE(_vq) \
      33             :         do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
      34             : #define LAST_ADD_TIME_UPDATE(_vq)                               \
      35             :         do {                                                    \
      36             :                 ktime_t now = ktime_get();                      \
      37             :                                                                 \
      38             :                 /* No kick or get, with .1 second between?  Warn. */ \
      39             :                 if ((_vq)->last_add_time_valid)                      \
      40             :                         WARN_ON(ktime_to_ms(ktime_sub(now,      \
      41             :                                 (_vq)->last_add_time)) > 100);    \
      42             :                 (_vq)->last_add_time = now;                  \
      43             :                 (_vq)->last_add_time_valid = true;           \
      44             :         } while (0)
      45             : #define LAST_ADD_TIME_CHECK(_vq)                                \
      46             :         do {                                                    \
      47             :                 if ((_vq)->last_add_time_valid) {            \
      48             :                         WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
      49             :                                       (_vq)->last_add_time)) > 100); \
      50             :                 }                                               \
      51             :         } while (0)
      52             : #define LAST_ADD_TIME_INVALID(_vq)                              \
      53             :         ((_vq)->last_add_time_valid = false)
      54             : #else
      55             : #define BAD_RING(_vq, fmt, args...)                             \
      56             :         do {                                                    \
      57             :                 dev_err(&_vq->vq.vdev->dev,                   \
      58             :                         "%s:"fmt, (_vq)->vq.name, ##args); \
      59             :                 (_vq)->broken = true;                                \
      60             :         } while (0)
      61             : #define START_USE(vq)
      62             : #define END_USE(vq)
      63             : #define LAST_ADD_TIME_UPDATE(vq)
      64             : #define LAST_ADD_TIME_CHECK(vq)
      65             : #define LAST_ADD_TIME_INVALID(vq)
      66             : #endif
      67             : 
      68             : struct vring_desc_state_split {
      69             :         void *data;                     /* Data for callback. */
      70             :         struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
      71             : };
      72             : 
      73             : struct vring_desc_state_packed {
      74             :         void *data;                     /* Data for callback. */
      75             :         struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
      76             :         u16 num;                        /* Descriptor list length. */
      77             :         u16 next;                       /* The next desc state in a list. */
      78             :         u16 last;                       /* The last desc state in a list. */
      79             : };
      80             : 
      81             : struct vring_desc_extra_packed {
      82             :         dma_addr_t addr;                /* Buffer DMA addr. */
      83             :         u32 len;                        /* Buffer length. */
      84             :         u16 flags;                      /* Descriptor flags. */
      85             : };
      86             : 
      87             : struct vring_virtqueue {
      88             :         struct virtqueue vq;
      89             : 
      90             :         /* Is this a packed ring? */
      91             :         bool packed_ring;
      92             : 
      93             :         /* Is DMA API used? */
      94             :         bool use_dma_api;
      95             : 
      96             :         /* Can we use weak barriers? */
      97             :         bool weak_barriers;
      98             : 
      99             :         /* Other side has made a mess, don't try any more. */
     100             :         bool broken;
     101             : 
     102             :         /* Host supports indirect buffers */
     103             :         bool indirect;
     104             : 
     105             :         /* Host publishes avail event idx */
     106             :         bool event;
     107             : 
     108             :         /* Head of free buffer list. */
     109             :         unsigned int free_head;
     110             :         /* Number we've added since last sync. */
     111             :         unsigned int num_added;
     112             : 
     113             :         /* Last used index we've seen. */
     114             :         u16 last_used_idx;
     115             : 
     116             :         union {
     117             :                 /* Available for split ring */
     118             :                 struct {
     119             :                         /* Actual memory layout for this queue. */
     120             :                         struct vring vring;
     121             : 
     122             :                         /* Last written value to avail->flags */
     123             :                         u16 avail_flags_shadow;
     124             : 
     125             :                         /*
     126             :                          * Last written value to avail->idx in
     127             :                          * guest byte order.
     128             :                          */
     129             :                         u16 avail_idx_shadow;
     130             : 
     131             :                         /* Per-descriptor state. */
     132             :                         struct vring_desc_state_split *desc_state;
     133             : 
     134             :                         /* DMA address and size information */
     135             :                         dma_addr_t queue_dma_addr;
     136             :                         size_t queue_size_in_bytes;
     137             :                 } split;
     138             : 
     139             :                 /* Available for packed ring */
     140             :                 struct {
     141             :                         /* Actual memory layout for this queue. */
     142             :                         struct {
     143             :                                 unsigned int num;
     144             :                                 struct vring_packed_desc *desc;
     145             :                                 struct vring_packed_desc_event *driver;
     146             :                                 struct vring_packed_desc_event *device;
     147             :                         } vring;
     148             : 
     149             :                         /* Driver ring wrap counter. */
     150             :                         bool avail_wrap_counter;
     151             : 
     152             :                         /* Device ring wrap counter. */
     153             :                         bool used_wrap_counter;
     154             : 
     155             :                         /* Avail used flags. */
     156             :                         u16 avail_used_flags;
     157             : 
     158             :                         /* Index of the next avail descriptor. */
     159             :                         u16 next_avail_idx;
     160             : 
     161             :                         /*
     162             :                          * Last written value to driver->flags in
     163             :                          * guest byte order.
     164             :                          */
     165             :                         u16 event_flags_shadow;
     166             : 
     167             :                         /* Per-descriptor state. */
     168             :                         struct vring_desc_state_packed *desc_state;
     169             :                         struct vring_desc_extra_packed *desc_extra;
     170             : 
     171             :                         /* DMA address and size information */
     172             :                         dma_addr_t ring_dma_addr;
     173             :                         dma_addr_t driver_event_dma_addr;
     174             :                         dma_addr_t device_event_dma_addr;
     175             :                         size_t ring_size_in_bytes;
     176             :                         size_t event_size_in_bytes;
     177             :                 } packed;
     178             :         };
     179             : 
     180             :         /* How to notify other side. FIXME: commonalize hcalls! */
     181             :         bool (*notify)(struct virtqueue *vq);
     182             : 
     183             :         /* DMA, allocation, and size information */
     184             :         bool we_own_ring;
     185             : 
     186             : #ifdef DEBUG
     187             :         /* They're supposed to lock for us. */
     188             :         unsigned int in_use;
     189             : 
     190             :         /* Figure out if their kicks are too delayed. */
     191             :         bool last_add_time_valid;
     192             :         ktime_t last_add_time;
     193             : #endif
     194             : };
     195             : 
     196             : 
     197             : /*
     198             :  * Helpers.
     199             :  */
     200             : 
     201             : #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
     202             : 
     203        5548 : static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
     204             :                                           unsigned int total_sg)
     205             : {
     206        5548 :         struct vring_virtqueue *vq = to_vvq(_vq);
     207             : 
     208             :         /*
     209             :          * If the host supports indirect descriptor tables, and we have multiple
     210             :          * buffers, then go indirect. FIXME: tune this threshold
     211             :          */
     212        3826 :         return (vq->indirect && total_sg > 1 && vq->vq.num_free);
     213             : }
     214             : 
     215             : /*
     216             :  * Modern virtio devices have feature bits to specify whether they need a
     217             :  * quirk and bypass the IOMMU. If not there, just use the DMA API.
     218             :  *
     219             :  * If there, the interaction between virtio and DMA API is messy.
     220             :  *
     221             :  * On most systems with virtio, physical addresses match bus addresses,
     222             :  * and it doesn't particularly matter whether we use the DMA API.
     223             :  *
     224             :  * On some systems, including Xen and any system with a physical device
     225             :  * that speaks virtio behind a physical IOMMU, we must use the DMA API
     226             :  * for virtio DMA to work at all.
     227             :  *
     228             :  * On other systems, including SPARC and PPC64, virtio-pci devices are
     229             :  * enumerated as though they are behind an IOMMU, but the virtio host
     230             :  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
     231             :  * there or somehow map everything as the identity.
     232             :  *
     233             :  * For the time being, we preserve historic behavior and bypass the DMA
     234             :  * API.
     235             :  *
     236             :  * TODO: install a per-device DMA ops structure that does the right thing
     237             :  * taking into account all the above quirks, and use the DMA API
     238             :  * unconditionally on data path.
     239             :  */
     240             : 
     241           9 : static bool vring_use_dma_api(struct virtio_device *vdev)
     242             : {
     243           9 :         if (!virtio_has_dma_quirk(vdev))
     244           0 :                 return true;
     245             : 
     246             :         /* Otherwise, we are left to guess. */
     247             :         /*
     248             :          * In theory, it's possible to have a buggy QEMU-supposed
     249             :          * emulated Q35 IOMMU and Xen enabled at the same time.  On
     250             :          * such a configuration, virtio has never worked and will
     251             :          * not work without an even larger kludge.  Instead, enable
     252             :          * the DMA API if we're a Xen guest, which at least allows
     253             :          * all of the sensible Xen configurations to work correctly.
     254             :          */
     255             :         if (xen_domain())
     256             :                 return true;
     257             : 
     258             :         return false;
     259             : }
     260             : 
     261           1 : size_t virtio_max_dma_size(struct virtio_device *vdev)
     262             : {
     263           1 :         size_t max_segment_size = SIZE_MAX;
     264             : 
     265           1 :         if (vring_use_dma_api(vdev))
     266           0 :                 max_segment_size = dma_max_mapping_size(&vdev->dev);
     267             : 
     268           1 :         return max_segment_size;
     269             : }
     270             : EXPORT_SYMBOL_GPL(virtio_max_dma_size);
     271             : 
     272           4 : static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
     273             :                               dma_addr_t *dma_handle, gfp_t flag)
     274             : {
     275           4 :         if (vring_use_dma_api(vdev)) {
     276           0 :                 return dma_alloc_coherent(vdev->dev.parent, size,
     277             :                                           dma_handle, flag);
     278             :         } else {
     279           4 :                 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
     280             : 
     281           4 :                 if (queue) {
     282           4 :                         phys_addr_t phys_addr = virt_to_phys(queue);
     283           4 :                         *dma_handle = (dma_addr_t)phys_addr;
     284             : 
     285             :                         /*
     286             :                          * Sanity check: make sure we dind't truncate
     287             :                          * the address.  The only arches I can find that
     288             :                          * have 64-bit phys_addr_t but 32-bit dma_addr_t
     289             :                          * are certain non-highmem MIPS and x86
     290             :                          * configurations, but these configurations
     291             :                          * should never allocate physical pages above 32
     292             :                          * bits, so this is fine.  Just in case, throw a
     293             :                          * warning and abort if we end up with an
     294             :                          * unrepresentable address.
     295             :                          */
     296           4 :                         if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
     297             :                                 free_pages_exact(queue, PAGE_ALIGN(size));
     298             :                                 return NULL;
     299             :                         }
     300             :                 }
     301           4 :                 return queue;
     302             :         }
     303             : }
     304             : 
     305           0 : static void vring_free_queue(struct virtio_device *vdev, size_t size,
     306             :                              void *queue, dma_addr_t dma_handle)
     307             : {
     308           0 :         if (vring_use_dma_api(vdev))
     309           0 :                 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
     310             :         else
     311           0 :                 free_pages_exact(queue, PAGE_ALIGN(size));
     312           0 : }
     313             : 
     314             : /*
     315             :  * The DMA ops on various arches are rather gnarly right now, and
     316             :  * making all of the arch DMA ops work on the vring device itself
     317             :  * is a mess.  For now, we use the parent device for DMA ops.
     318             :  */
     319           0 : static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
     320             : {
     321           0 :         return vq->vq.vdev->dev.parent;
     322             : }
     323             : 
     324             : /* Map one sg entry. */
     325       19655 : static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
     326             :                                    struct scatterlist *sg,
     327             :                                    enum dma_data_direction direction)
     328             : {
     329       19655 :         if (!vq->use_dma_api)
     330       19655 :                 return (dma_addr_t)sg_phys(sg);
     331             : 
     332             :         /*
     333             :          * We can't use dma_map_sg, because we don't use scatterlists in
     334             :          * the way it expects (we don't guarantee that the scatterlist
     335             :          * will exist for the lifetime of the mapping).
     336             :          */
     337           0 :         return dma_map_page(vring_dma_dev(vq),
     338             :                             sg_page(sg), sg->offset, sg->length,
     339             :                             direction);
     340             : }
     341             : 
     342        3379 : static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
     343             :                                    void *cpu_addr, size_t size,
     344             :                                    enum dma_data_direction direction)
     345             : {
     346        3379 :         if (!vq->use_dma_api)
     347        3379 :                 return (dma_addr_t)virt_to_phys(cpu_addr);
     348             : 
     349           0 :         return dma_map_single(vring_dma_dev(vq),
     350             :                               cpu_addr, size, direction);
     351             : }
     352             : 
     353       23034 : static int vring_mapping_error(const struct vring_virtqueue *vq,
     354             :                                dma_addr_t addr)
     355             : {
     356       23034 :         if (!vq->use_dma_api)
     357             :                 return 0;
     358             : 
     359           0 :         return dma_mapping_error(vring_dma_dev(vq), addr);
     360             : }
     361             : 
     362             : 
     363             : /*
     364             :  * Split ring specific functions - *_split().
     365             :  */
     366             : 
     367       22034 : static void vring_unmap_one_split(const struct vring_virtqueue *vq,
     368             :                                   struct vring_desc *desc)
     369             : {
     370       22034 :         u16 flags;
     371             : 
     372       22034 :         if (!vq->use_dma_api)
     373             :                 return;
     374             : 
     375           0 :         flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
     376             : 
     377           0 :         if (flags & VRING_DESC_F_INDIRECT) {
     378           0 :                 dma_unmap_single(vring_dma_dev(vq),
     379             :                                  virtio64_to_cpu(vq->vq.vdev, desc->addr),
     380             :                                  virtio32_to_cpu(vq->vq.vdev, desc->len),
     381             :                                  (flags & VRING_DESC_F_WRITE) ?
     382             :                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
     383             :         } else {
     384           0 :                 dma_unmap_page(vring_dma_dev(vq),
     385             :                                virtio64_to_cpu(vq->vq.vdev, desc->addr),
     386             :                                virtio32_to_cpu(vq->vq.vdev, desc->len),
     387             :                                (flags & VRING_DESC_F_WRITE) ?
     388             :                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
     389             :         }
     390             : }
     391             : 
     392        3379 : static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
     393             :                                                unsigned int total_sg,
     394             :                                                gfp_t gfp)
     395             : {
     396        3379 :         struct vring_desc *desc;
     397        3379 :         unsigned int i;
     398             : 
     399             :         /*
     400             :          * We require lowmem mappings for the descriptors because
     401             :          * otherwise virt_to_phys will give us bogus addresses in the
     402             :          * virtqueue.
     403             :          */
     404        3379 :         gfp &= ~__GFP_HIGHMEM;
     405             : 
     406        3379 :         desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
     407        3379 :         if (!desc)
     408             :                 return NULL;
     409             : 
     410       20865 :         for (i = 0; i < total_sg; i++)
     411       17486 :                 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
     412             :         return desc;
     413             : }
     414             : 
     415        5548 : static inline int virtqueue_add_split(struct virtqueue *_vq,
     416             :                                       struct scatterlist *sgs[],
     417             :                                       unsigned int total_sg,
     418             :                                       unsigned int out_sgs,
     419             :                                       unsigned int in_sgs,
     420             :                                       void *data,
     421             :                                       void *ctx,
     422             :                                       gfp_t gfp)
     423             : {
     424        5548 :         struct vring_virtqueue *vq = to_vvq(_vq);
     425        5548 :         struct scatterlist *sg;
     426        5548 :         struct vring_desc *desc;
     427        5548 :         unsigned int i, n, avail, descs_used, prev, err_idx;
     428        5548 :         int head;
     429        5548 :         bool indirect;
     430             : 
     431        5548 :         START_USE(vq);
     432             : 
     433        5548 :         BUG_ON(data == NULL);
     434        5548 :         BUG_ON(ctx && vq->indirect);
     435             : 
     436        5548 :         if (unlikely(vq->broken)) {
     437             :                 END_USE(vq);
     438             :                 return -EIO;
     439             :         }
     440             : 
     441        5548 :         LAST_ADD_TIME_UPDATE(vq);
     442             : 
     443        5548 :         BUG_ON(total_sg == 0);
     444             : 
     445        5548 :         head = vq->free_head;
     446             : 
     447       11096 :         if (virtqueue_use_indirect(_vq, total_sg))
     448        3379 :                 desc = alloc_indirect_split(_vq, total_sg, gfp);
     449             :         else {
     450        2169 :                 desc = NULL;
     451        4338 :                 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
     452             :         }
     453             : 
     454        3379 :         if (desc) {
     455             :                 /* Use a single buffer which doesn't continue */
     456             :                 indirect = true;
     457             :                 /* Set up rest to use this indirect table. */
     458             :                 i = 0;
     459             :                 descs_used = 1;
     460             :         } else {
     461        2169 :                 indirect = false;
     462        2169 :                 desc = vq->split.vring.desc;
     463        2169 :                 i = head;
     464        2169 :                 descs_used = total_sg;
     465             :         }
     466             : 
     467        5548 :         if (vq->vq.num_free < descs_used) {
     468           0 :                 pr_debug("Can't add buf len %i - avail = %i\n",
     469             :                          descs_used, vq->vq.num_free);
     470             :                 /* FIXME: for historical reasons, we force a notify here if
     471             :                  * there are outgoing parts to the buffer.  Presumably the
     472             :                  * host should service the ring ASAP. */
     473           0 :                 if (out_sgs)
     474           0 :                         vq->notify(&vq->vq);
     475           0 :                 if (indirect)
     476           0 :                         kfree(desc);
     477           0 :                 END_USE(vq);
     478           0 :                 return -ENOSPC;
     479             :         }
     480             : 
     481        9999 :         for (n = 0; n < out_sgs; n++) {
     482       11245 :                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
     483        6794 :                         dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
     484        6794 :                         if (vring_mapping_error(vq, addr))
     485           0 :                                 goto unmap_release;
     486             : 
     487        6794 :                         desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
     488        6794 :                         desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
     489        6794 :                         desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
     490        6794 :                         prev = i;
     491        6794 :                         i = virtio16_to_cpu(_vq->vdev, desc[i].next);
     492             :                 }
     493             :         }
     494       13212 :         for (; n < (out_sgs + in_sgs); n++) {
     495       20525 :                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
     496       12861 :                         dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
     497       12861 :                         if (vring_mapping_error(vq, addr))
     498           0 :                                 goto unmap_release;
     499             : 
     500       12861 :                         desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
     501       12861 :                         desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
     502       12861 :                         desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
     503       12861 :                         prev = i;
     504       12861 :                         i = virtio16_to_cpu(_vq->vdev, desc[i].next);
     505             :                 }
     506             :         }
     507             :         /* Last one doesn't continue. */
     508        5548 :         desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
     509             : 
     510        5548 :         if (indirect) {
     511             :                 /* Now that the indirect table is filled in, map it. */
     512        3379 :                 dma_addr_t addr = vring_map_single(
     513             :                         vq, desc, total_sg * sizeof(struct vring_desc),
     514             :                         DMA_TO_DEVICE);
     515        3379 :                 if (vring_mapping_error(vq, addr))
     516           0 :                         goto unmap_release;
     517             : 
     518        3379 :                 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
     519             :                                 VRING_DESC_F_INDIRECT);
     520        3379 :                 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
     521             :                                 addr);
     522             : 
     523        3379 :                 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
     524             :                                 total_sg * sizeof(struct vring_desc));
     525             :         }
     526             : 
     527             :         /* We're using some buffers from the free list. */
     528        5548 :         vq->vq.num_free -= descs_used;
     529             : 
     530             :         /* Update free pointer */
     531        5548 :         if (indirect)
     532        3379 :                 vq->free_head = virtio16_to_cpu(_vq->vdev,
     533        3379 :                                         vq->split.vring.desc[head].next);
     534             :         else
     535        2169 :                 vq->free_head = i;
     536             : 
     537             :         /* Store token and indirect buffer state. */
     538        5548 :         vq->split.desc_state[head].data = data;
     539        5548 :         if (indirect)
     540        3379 :                 vq->split.desc_state[head].indir_desc = desc;
     541             :         else
     542        2169 :                 vq->split.desc_state[head].indir_desc = ctx;
     543             : 
     544             :         /* Put entry in available array (but don't update avail->idx until they
     545             :          * do sync). */
     546        5548 :         avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
     547        5548 :         vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
     548             : 
     549             :         /* Descriptors and available array need to be set before we expose the
     550             :          * new available array entries. */
     551        5548 :         virtio_wmb(vq->weak_barriers);
     552        5548 :         vq->split.avail_idx_shadow++;
     553        5548 :         vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
     554             :                                                 vq->split.avail_idx_shadow);
     555        5548 :         vq->num_added++;
     556             : 
     557        5548 :         pr_debug("Added buffer head %i to %p\n", head, vq);
     558        5548 :         END_USE(vq);
     559             : 
     560             :         /* This is very unlikely, but theoretically possible.  Kick
     561             :          * just in case. */
     562        5548 :         if (unlikely(vq->num_added == (1 << 16) - 1))
     563           0 :                 virtqueue_kick(_vq);
     564             : 
     565             :         return 0;
     566             : 
     567           0 : unmap_release:
     568           0 :         err_idx = i;
     569             : 
     570           0 :         if (indirect)
     571             :                 i = 0;
     572             :         else
     573           0 :                 i = head;
     574             : 
     575           0 :         for (n = 0; n < total_sg; n++) {
     576           0 :                 if (i == err_idx)
     577             :                         break;
     578           0 :                 vring_unmap_one_split(vq, &desc[i]);
     579           0 :                 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
     580             :         }
     581             : 
     582           0 :         if (indirect)
     583           0 :                 kfree(desc);
     584             : 
     585             :         END_USE(vq);
     586             :         return -ENOMEM;
     587             : }
     588             : 
     589        3214 : static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
     590             : {
     591        3214 :         struct vring_virtqueue *vq = to_vvq(_vq);
     592        3214 :         u16 new, old;
     593        3214 :         bool needs_kick;
     594             : 
     595        3214 :         START_USE(vq);
     596             :         /* We need to expose available array entries before checking avail
     597             :          * event. */
     598        3214 :         virtio_mb(vq->weak_barriers);
     599             : 
     600        3214 :         old = vq->split.avail_idx_shadow - vq->num_added;
     601        3214 :         new = vq->split.avail_idx_shadow;
     602        3214 :         vq->num_added = 0;
     603             : 
     604        3214 :         LAST_ADD_TIME_CHECK(vq);
     605        3214 :         LAST_ADD_TIME_INVALID(vq);
     606             : 
     607        3214 :         if (vq->event) {
     608        3214 :                 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
     609        3214 :                                         vring_avail_event(&vq->split.vring)),
     610             :                                               new, old);
     611             :         } else {
     612           0 :                 needs_kick = !(vq->split.vring.used->flags &
     613           0 :                                         cpu_to_virtio16(_vq->vdev,
     614             :                                                 VRING_USED_F_NO_NOTIFY));
     615             :         }
     616        3214 :         END_USE(vq);
     617        3214 :         return needs_kick;
     618             : }
     619             : 
     620        4548 : static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
     621             :                              void **ctx)
     622             : {
     623        4548 :         unsigned int i, j;
     624        4548 :         __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
     625             : 
     626             :         /* Clear data ptr. */
     627        4548 :         vq->split.desc_state[head].data = NULL;
     628             : 
     629             :         /* Put back on free list: unmap first-level descriptors and find end */
     630        4548 :         i = head;
     631             : 
     632        4548 :         while (vq->split.vring.desc[i].flags & nextflag) {
     633           0 :                 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
     634           0 :                 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
     635           0 :                 vq->vq.num_free++;
     636             :         }
     637             : 
     638        4548 :         vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
     639        9096 :         vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
     640        4548 :                                                 vq->free_head);
     641        4548 :         vq->free_head = head;
     642             : 
     643             :         /* Plus final descriptor */
     644        4548 :         vq->vq.num_free++;
     645             : 
     646        4548 :         if (vq->indirect) {
     647        3826 :                 struct vring_desc *indir_desc =
     648        3826 :                                 vq->split.desc_state[head].indir_desc;
     649        3826 :                 u32 len;
     650             : 
     651             :                 /* Free the indirect table, if any, now that it's unmapped. */
     652        3826 :                 if (!indir_desc)
     653             :                         return;
     654             : 
     655        6758 :                 len = virtio32_to_cpu(vq->vq.vdev,
     656        3379 :                                 vq->split.vring.desc[head].len);
     657             : 
     658        3379 :                 BUG_ON(!(vq->split.vring.desc[head].flags &
     659             :                          cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
     660        3379 :                 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
     661             : 
     662       20865 :                 for (j = 0; j < len / sizeof(struct vring_desc); j++)
     663       17486 :                         vring_unmap_one_split(vq, &indir_desc[j]);
     664             : 
     665        3379 :                 kfree(indir_desc);
     666        3379 :                 vq->split.desc_state[head].indir_desc = NULL;
     667         722 :         } else if (ctx) {
     668         722 :                 *ctx = vq->split.desc_state[head].indir_desc;
     669             :         }
     670             : }
     671             : 
     672       16899 : static inline bool more_used_split(const struct vring_virtqueue *vq)
     673             : {
     674       33798 :         return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
     675       16899 :                         vq->split.vring.used->idx);
     676             : }
     677             : 
     678       12772 : static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
     679             :                                          unsigned int *len,
     680             :                                          void **ctx)
     681             : {
     682       12772 :         struct vring_virtqueue *vq = to_vvq(_vq);
     683       12772 :         void *ret;
     684       12772 :         unsigned int i;
     685       12772 :         u16 last_used;
     686             : 
     687       12772 :         START_USE(vq);
     688             : 
     689       12772 :         if (unlikely(vq->broken)) {
     690             :                 END_USE(vq);
     691             :                 return NULL;
     692             :         }
     693             : 
     694       12772 :         if (!more_used_split(vq)) {
     695             :                 pr_debug("No more buffers in queue\n");
     696             :                 END_USE(vq);
     697             :                 return NULL;
     698             :         }
     699             : 
     700             :         /* Only get used array entries after they have been exposed by host. */
     701        4548 :         virtio_rmb(vq->weak_barriers);
     702             : 
     703        4548 :         last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
     704        9096 :         i = virtio32_to_cpu(_vq->vdev,
     705        4548 :                         vq->split.vring.used->ring[last_used].id);
     706        9096 :         *len = virtio32_to_cpu(_vq->vdev,
     707        4548 :                         vq->split.vring.used->ring[last_used].len);
     708             : 
     709        4548 :         if (unlikely(i >= vq->split.vring.num)) {
     710           0 :                 BAD_RING(vq, "id %u out of range\n", i);
     711           0 :                 return NULL;
     712             :         }
     713        4548 :         if (unlikely(!vq->split.desc_state[i].data)) {
     714           0 :                 BAD_RING(vq, "id %u is not a head!\n", i);
     715           0 :                 return NULL;
     716             :         }
     717             : 
     718             :         /* detach_buf_split clears data, so grab it now. */
     719        4548 :         ret = vq->split.desc_state[i].data;
     720        4548 :         detach_buf_split(vq, i, ctx);
     721        4548 :         vq->last_used_idx++;
     722             :         /* If we expect an interrupt for the next entry, tell host
     723             :          * by writing event index and flush out the write before
     724             :          * the read in the next get_buf call. */
     725        4548 :         if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
     726           4 :                 virtio_store_mb(vq->weak_barriers,
     727             :                                 &vring_used_event(&vq->split.vring),
     728             :                                 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
     729             : 
     730             :         LAST_ADD_TIME_INVALID(vq);
     731             : 
     732             :         END_USE(vq);
     733             :         return ret;
     734             : }
     735             : 
     736        3968 : static void virtqueue_disable_cb_split(struct virtqueue *_vq)
     737             : {
     738        3968 :         struct vring_virtqueue *vq = to_vvq(_vq);
     739             : 
     740        3968 :         if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
     741        3561 :                 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
     742        3561 :                 if (!vq->event)
     743           0 :                         vq->split.vring.avail->flags =
     744           0 :                                 cpu_to_virtio16(_vq->vdev,
     745             :                                                 vq->split.avail_flags_shadow);
     746             :         }
     747        3968 : }
     748             : 
     749        3525 : static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
     750             : {
     751        3525 :         struct vring_virtqueue *vq = to_vvq(_vq);
     752        3525 :         u16 last_used_idx;
     753             : 
     754        3525 :         START_USE(vq);
     755             : 
     756             :         /* We optimistically turn back on interrupts, then check if there was
     757             :          * more to do. */
     758             :         /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
     759             :          * either clear the flags bit or point the event index at the next
     760             :          * entry. Always do both to keep code simple. */
     761        3525 :         if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
     762        3518 :                 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
     763        3518 :                 if (!vq->event)
     764           0 :                         vq->split.vring.avail->flags =
     765           0 :                                 cpu_to_virtio16(_vq->vdev,
     766             :                                                 vq->split.avail_flags_shadow);
     767             :         }
     768        7050 :         vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
     769        3525 :                         last_used_idx = vq->last_used_idx);
     770        3525 :         END_USE(vq);
     771        3525 :         return last_used_idx;
     772             : }
     773             : 
     774        3448 : static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
     775             : {
     776        3448 :         struct vring_virtqueue *vq = to_vvq(_vq);
     777             : 
     778        6896 :         return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
     779        3448 :                         vq->split.vring.used->idx);
     780             : }
     781             : 
     782         447 : static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
     783             : {
     784         447 :         struct vring_virtqueue *vq = to_vvq(_vq);
     785         447 :         u16 bufs;
     786             : 
     787         447 :         START_USE(vq);
     788             : 
     789             :         /* We optimistically turn back on interrupts, then check if there was
     790             :          * more to do. */
     791             :         /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
     792             :          * either clear the flags bit or point the event index at the next
     793             :          * entry. Always update the event index to keep code simple. */
     794         447 :         if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
     795          43 :                 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
     796          43 :                 if (!vq->event)
     797           0 :                         vq->split.vring.avail->flags =
     798           0 :                                 cpu_to_virtio16(_vq->vdev,
     799             :                                                 vq->split.avail_flags_shadow);
     800             :         }
     801             :         /* TODO: tune this threshold */
     802         447 :         bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
     803             : 
     804         447 :         virtio_store_mb(vq->weak_barriers,
     805             :                         &vring_used_event(&vq->split.vring),
     806             :                         cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
     807             : 
     808         447 :         if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
     809             :                                         - vq->last_used_idx) > bufs)) {
     810           0 :                 END_USE(vq);
     811           0 :                 return false;
     812             :         }
     813             : 
     814             :         END_USE(vq);
     815             :         return true;
     816             : }
     817             : 
     818           0 : static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
     819             : {
     820           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
     821           0 :         unsigned int i;
     822           0 :         void *buf;
     823             : 
     824           0 :         START_USE(vq);
     825             : 
     826           0 :         for (i = 0; i < vq->split.vring.num; i++) {
     827           0 :                 if (!vq->split.desc_state[i].data)
     828           0 :                         continue;
     829             :                 /* detach_buf_split clears data, so grab it now. */
     830           0 :                 buf = vq->split.desc_state[i].data;
     831           0 :                 detach_buf_split(vq, i, NULL);
     832           0 :                 vq->split.avail_idx_shadow--;
     833           0 :                 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
     834             :                                 vq->split.avail_idx_shadow);
     835           0 :                 END_USE(vq);
     836           0 :                 return buf;
     837             :         }
     838             :         /* That should have freed everything. */
     839           0 :         BUG_ON(vq->vq.num_free != vq->split.vring.num);
     840             : 
     841             :         END_USE(vq);
     842             :         return NULL;
     843             : }
     844             : 
     845           4 : static struct virtqueue *vring_create_virtqueue_split(
     846             :         unsigned int index,
     847             :         unsigned int num,
     848             :         unsigned int vring_align,
     849             :         struct virtio_device *vdev,
     850             :         bool weak_barriers,
     851             :         bool may_reduce_num,
     852             :         bool context,
     853             :         bool (*notify)(struct virtqueue *),
     854             :         void (*callback)(struct virtqueue *),
     855             :         const char *name)
     856             : {
     857           4 :         struct virtqueue *vq;
     858           4 :         void *queue = NULL;
     859           4 :         dma_addr_t dma_addr;
     860           4 :         size_t queue_size_in_bytes;
     861           4 :         struct vring vring;
     862             : 
     863             :         /* We assume num is a power of 2. */
     864           4 :         if (num & (num - 1)) {
     865           0 :                 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
     866           0 :                 return NULL;
     867             :         }
     868             : 
     869             :         /* TODO: allocate each queue chunk individually */
     870           4 :         for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
     871           4 :                 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
     872             :                                           &dma_addr,
     873             :                                           GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
     874           4 :                 if (queue)
     875             :                         break;
     876           0 :                 if (!may_reduce_num)
     877             :                         return NULL;
     878             :         }
     879             : 
     880           4 :         if (!num)
     881             :                 return NULL;
     882             : 
     883           4 :         if (!queue) {
     884             :                 /* Try to get a single page. You are my only hope! */
     885           0 :                 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
     886             :                                           &dma_addr, GFP_KERNEL|__GFP_ZERO);
     887             :         }
     888           4 :         if (!queue)
     889             :                 return NULL;
     890             : 
     891           4 :         queue_size_in_bytes = vring_size(num, vring_align);
     892           4 :         vring_init(&vring, num, queue, vring_align);
     893             : 
     894           4 :         vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
     895             :                                    notify, callback, name);
     896           4 :         if (!vq) {
     897           0 :                 vring_free_queue(vdev, queue_size_in_bytes, queue,
     898             :                                  dma_addr);
     899           0 :                 return NULL;
     900             :         }
     901             : 
     902           4 :         to_vvq(vq)->split.queue_dma_addr = dma_addr;
     903           4 :         to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
     904           4 :         to_vvq(vq)->we_own_ring = true;
     905             : 
     906           4 :         return vq;
     907             : }
     908             : 
     909             : 
     910             : /*
     911             :  * Packed ring specific functions - *_packed().
     912             :  */
     913             : 
     914           0 : static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
     915             :                                      struct vring_desc_extra_packed *state)
     916             : {
     917           0 :         u16 flags;
     918             : 
     919           0 :         if (!vq->use_dma_api)
     920             :                 return;
     921             : 
     922           0 :         flags = state->flags;
     923             : 
     924           0 :         if (flags & VRING_DESC_F_INDIRECT) {
     925           0 :                 dma_unmap_single(vring_dma_dev(vq),
     926             :                                  state->addr, state->len,
     927             :                                  (flags & VRING_DESC_F_WRITE) ?
     928             :                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
     929             :         } else {
     930           0 :                 dma_unmap_page(vring_dma_dev(vq),
     931             :                                state->addr, state->len,
     932             :                                (flags & VRING_DESC_F_WRITE) ?
     933             :                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
     934             :         }
     935             : }
     936             : 
     937           0 : static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
     938             :                                    struct vring_packed_desc *desc)
     939             : {
     940           0 :         u16 flags;
     941             : 
     942           0 :         if (!vq->use_dma_api)
     943             :                 return;
     944             : 
     945           0 :         flags = le16_to_cpu(desc->flags);
     946             : 
     947           0 :         if (flags & VRING_DESC_F_INDIRECT) {
     948           0 :                 dma_unmap_single(vring_dma_dev(vq),
     949             :                                  le64_to_cpu(desc->addr),
     950             :                                  le32_to_cpu(desc->len),
     951             :                                  (flags & VRING_DESC_F_WRITE) ?
     952             :                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
     953             :         } else {
     954           0 :                 dma_unmap_page(vring_dma_dev(vq),
     955             :                                le64_to_cpu(desc->addr),
     956             :                                le32_to_cpu(desc->len),
     957             :                                (flags & VRING_DESC_F_WRITE) ?
     958             :                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
     959             :         }
     960             : }
     961             : 
     962           0 : static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
     963             :                                                        gfp_t gfp)
     964             : {
     965           0 :         struct vring_packed_desc *desc;
     966             : 
     967             :         /*
     968             :          * We require lowmem mappings for the descriptors because
     969             :          * otherwise virt_to_phys will give us bogus addresses in the
     970             :          * virtqueue.
     971             :          */
     972           0 :         gfp &= ~__GFP_HIGHMEM;
     973             : 
     974           0 :         desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
     975             : 
     976           0 :         return desc;
     977             : }
     978             : 
     979           0 : static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
     980             :                                        struct scatterlist *sgs[],
     981             :                                        unsigned int total_sg,
     982             :                                        unsigned int out_sgs,
     983             :                                        unsigned int in_sgs,
     984             :                                        void *data,
     985             :                                        gfp_t gfp)
     986             : {
     987           0 :         struct vring_packed_desc *desc;
     988           0 :         struct scatterlist *sg;
     989           0 :         unsigned int i, n, err_idx;
     990           0 :         u16 head, id;
     991           0 :         dma_addr_t addr;
     992             : 
     993           0 :         head = vq->packed.next_avail_idx;
     994           0 :         desc = alloc_indirect_packed(total_sg, gfp);
     995             : 
     996           0 :         if (unlikely(vq->vq.num_free < 1)) {
     997           0 :                 pr_debug("Can't add buf len 1 - avail = 0\n");
     998           0 :                 kfree(desc);
     999           0 :                 END_USE(vq);
    1000           0 :                 return -ENOSPC;
    1001             :         }
    1002             : 
    1003           0 :         i = 0;
    1004           0 :         id = vq->free_head;
    1005           0 :         BUG_ON(id == vq->packed.vring.num);
    1006             : 
    1007           0 :         for (n = 0; n < out_sgs + in_sgs; n++) {
    1008           0 :                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
    1009           0 :                         addr = vring_map_one_sg(vq, sg, n < out_sgs ?
    1010             :                                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
    1011           0 :                         if (vring_mapping_error(vq, addr))
    1012           0 :                                 goto unmap_release;
    1013             : 
    1014           0 :                         desc[i].flags = cpu_to_le16(n < out_sgs ?
    1015             :                                                 0 : VRING_DESC_F_WRITE);
    1016           0 :                         desc[i].addr = cpu_to_le64(addr);
    1017           0 :                         desc[i].len = cpu_to_le32(sg->length);
    1018           0 :                         i++;
    1019             :                 }
    1020             :         }
    1021             : 
    1022             :         /* Now that the indirect table is filled in, map it. */
    1023           0 :         addr = vring_map_single(vq, desc,
    1024             :                         total_sg * sizeof(struct vring_packed_desc),
    1025             :                         DMA_TO_DEVICE);
    1026           0 :         if (vring_mapping_error(vq, addr))
    1027           0 :                 goto unmap_release;
    1028             : 
    1029           0 :         vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
    1030           0 :         vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
    1031             :                                 sizeof(struct vring_packed_desc));
    1032           0 :         vq->packed.vring.desc[head].id = cpu_to_le16(id);
    1033             : 
    1034           0 :         if (vq->use_dma_api) {
    1035           0 :                 vq->packed.desc_extra[id].addr = addr;
    1036           0 :                 vq->packed.desc_extra[id].len = total_sg *
    1037             :                                 sizeof(struct vring_packed_desc);
    1038           0 :                 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
    1039           0 :                                                   vq->packed.avail_used_flags;
    1040             :         }
    1041             : 
    1042             :         /*
    1043             :          * A driver MUST NOT make the first descriptor in the list
    1044             :          * available before all subsequent descriptors comprising
    1045             :          * the list are made available.
    1046             :          */
    1047           0 :         virtio_wmb(vq->weak_barriers);
    1048           0 :         vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
    1049             :                                                 vq->packed.avail_used_flags);
    1050             : 
    1051             :         /* We're using some buffers from the free list. */
    1052           0 :         vq->vq.num_free -= 1;
    1053             : 
    1054             :         /* Update free pointer */
    1055           0 :         n = head + 1;
    1056           0 :         if (n >= vq->packed.vring.num) {
    1057           0 :                 n = 0;
    1058           0 :                 vq->packed.avail_wrap_counter ^= 1;
    1059           0 :                 vq->packed.avail_used_flags ^=
    1060             :                                 1 << VRING_PACKED_DESC_F_AVAIL |
    1061             :                                 1 << VRING_PACKED_DESC_F_USED;
    1062             :         }
    1063           0 :         vq->packed.next_avail_idx = n;
    1064           0 :         vq->free_head = vq->packed.desc_state[id].next;
    1065             : 
    1066             :         /* Store token and indirect buffer state. */
    1067           0 :         vq->packed.desc_state[id].num = 1;
    1068           0 :         vq->packed.desc_state[id].data = data;
    1069           0 :         vq->packed.desc_state[id].indir_desc = desc;
    1070           0 :         vq->packed.desc_state[id].last = id;
    1071             : 
    1072           0 :         vq->num_added += 1;
    1073             : 
    1074           0 :         pr_debug("Added buffer head %i to %p\n", head, vq);
    1075           0 :         END_USE(vq);
    1076             : 
    1077           0 :         return 0;
    1078             : 
    1079           0 : unmap_release:
    1080           0 :         err_idx = i;
    1081             : 
    1082           0 :         for (i = 0; i < err_idx; i++)
    1083           0 :                 vring_unmap_desc_packed(vq, &desc[i]);
    1084             : 
    1085           0 :         kfree(desc);
    1086             : 
    1087           0 :         END_USE(vq);
    1088           0 :         return -ENOMEM;
    1089             : }
    1090             : 
    1091           0 : static inline int virtqueue_add_packed(struct virtqueue *_vq,
    1092             :                                        struct scatterlist *sgs[],
    1093             :                                        unsigned int total_sg,
    1094             :                                        unsigned int out_sgs,
    1095             :                                        unsigned int in_sgs,
    1096             :                                        void *data,
    1097             :                                        void *ctx,
    1098             :                                        gfp_t gfp)
    1099             : {
    1100           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1101           0 :         struct vring_packed_desc *desc;
    1102           0 :         struct scatterlist *sg;
    1103           0 :         unsigned int i, n, c, descs_used, err_idx;
    1104           0 :         __le16 head_flags, flags;
    1105           0 :         u16 head, id, prev, curr, avail_used_flags;
    1106             : 
    1107           0 :         START_USE(vq);
    1108             : 
    1109           0 :         BUG_ON(data == NULL);
    1110           0 :         BUG_ON(ctx && vq->indirect);
    1111             : 
    1112           0 :         if (unlikely(vq->broken)) {
    1113             :                 END_USE(vq);
    1114             :                 return -EIO;
    1115             :         }
    1116             : 
    1117           0 :         LAST_ADD_TIME_UPDATE(vq);
    1118             : 
    1119           0 :         BUG_ON(total_sg == 0);
    1120             : 
    1121           0 :         if (virtqueue_use_indirect(_vq, total_sg))
    1122           0 :                 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
    1123             :                                 out_sgs, in_sgs, data, gfp);
    1124             : 
    1125           0 :         head = vq->packed.next_avail_idx;
    1126           0 :         avail_used_flags = vq->packed.avail_used_flags;
    1127             : 
    1128           0 :         WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
    1129             : 
    1130           0 :         desc = vq->packed.vring.desc;
    1131           0 :         i = head;
    1132           0 :         descs_used = total_sg;
    1133             : 
    1134           0 :         if (unlikely(vq->vq.num_free < descs_used)) {
    1135             :                 pr_debug("Can't add buf len %i - avail = %i\n",
    1136             :                          descs_used, vq->vq.num_free);
    1137             :                 END_USE(vq);
    1138             :                 return -ENOSPC;
    1139             :         }
    1140             : 
    1141           0 :         id = vq->free_head;
    1142           0 :         BUG_ON(id == vq->packed.vring.num);
    1143             : 
    1144             :         curr = id;
    1145             :         c = 0;
    1146           0 :         for (n = 0; n < out_sgs + in_sgs; n++) {
    1147           0 :                 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
    1148           0 :                         dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
    1149             :                                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
    1150           0 :                         if (vring_mapping_error(vq, addr))
    1151           0 :                                 goto unmap_release;
    1152             : 
    1153           0 :                         flags = cpu_to_le16(vq->packed.avail_used_flags |
    1154             :                                     (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
    1155             :                                     (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
    1156           0 :                         if (i == head)
    1157             :                                 head_flags = flags;
    1158             :                         else
    1159           0 :                                 desc[i].flags = flags;
    1160             : 
    1161           0 :                         desc[i].addr = cpu_to_le64(addr);
    1162           0 :                         desc[i].len = cpu_to_le32(sg->length);
    1163           0 :                         desc[i].id = cpu_to_le16(id);
    1164             : 
    1165           0 :                         if (unlikely(vq->use_dma_api)) {
    1166           0 :                                 vq->packed.desc_extra[curr].addr = addr;
    1167           0 :                                 vq->packed.desc_extra[curr].len = sg->length;
    1168           0 :                                 vq->packed.desc_extra[curr].flags =
    1169             :                                         le16_to_cpu(flags);
    1170             :                         }
    1171           0 :                         prev = curr;
    1172           0 :                         curr = vq->packed.desc_state[curr].next;
    1173             : 
    1174           0 :                         if ((unlikely(++i >= vq->packed.vring.num))) {
    1175           0 :                                 i = 0;
    1176           0 :                                 vq->packed.avail_used_flags ^=
    1177             :                                         1 << VRING_PACKED_DESC_F_AVAIL |
    1178             :                                         1 << VRING_PACKED_DESC_F_USED;
    1179             :                         }
    1180             :                 }
    1181             :         }
    1182             : 
    1183           0 :         if (i < head)
    1184           0 :                 vq->packed.avail_wrap_counter ^= 1;
    1185             : 
    1186             :         /* We're using some buffers from the free list. */
    1187           0 :         vq->vq.num_free -= descs_used;
    1188             : 
    1189             :         /* Update free pointer */
    1190           0 :         vq->packed.next_avail_idx = i;
    1191           0 :         vq->free_head = curr;
    1192             : 
    1193             :         /* Store token. */
    1194           0 :         vq->packed.desc_state[id].num = descs_used;
    1195           0 :         vq->packed.desc_state[id].data = data;
    1196           0 :         vq->packed.desc_state[id].indir_desc = ctx;
    1197           0 :         vq->packed.desc_state[id].last = prev;
    1198             : 
    1199             :         /*
    1200             :          * A driver MUST NOT make the first descriptor in the list
    1201             :          * available before all subsequent descriptors comprising
    1202             :          * the list are made available.
    1203             :          */
    1204           0 :         virtio_wmb(vq->weak_barriers);
    1205           0 :         vq->packed.vring.desc[head].flags = head_flags;
    1206           0 :         vq->num_added += descs_used;
    1207             : 
    1208           0 :         pr_debug("Added buffer head %i to %p\n", head, vq);
    1209           0 :         END_USE(vq);
    1210             : 
    1211           0 :         return 0;
    1212             : 
    1213           0 : unmap_release:
    1214           0 :         err_idx = i;
    1215           0 :         i = head;
    1216             : 
    1217           0 :         vq->packed.avail_used_flags = avail_used_flags;
    1218             : 
    1219           0 :         for (n = 0; n < total_sg; n++) {
    1220           0 :                 if (i == err_idx)
    1221             :                         break;
    1222           0 :                 vring_unmap_desc_packed(vq, &desc[i]);
    1223           0 :                 i++;
    1224           0 :                 if (i >= vq->packed.vring.num)
    1225           0 :                         i = 0;
    1226             :         }
    1227             : 
    1228             :         END_USE(vq);
    1229             :         return -EIO;
    1230             : }
    1231             : 
    1232           0 : static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
    1233             : {
    1234           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1235           0 :         u16 new, old, off_wrap, flags, wrap_counter, event_idx;
    1236           0 :         bool needs_kick;
    1237           0 :         union {
    1238             :                 struct {
    1239             :                         __le16 off_wrap;
    1240             :                         __le16 flags;
    1241             :                 };
    1242             :                 u32 u32;
    1243             :         } snapshot;
    1244             : 
    1245           0 :         START_USE(vq);
    1246             : 
    1247             :         /*
    1248             :          * We need to expose the new flags value before checking notification
    1249             :          * suppressions.
    1250             :          */
    1251           0 :         virtio_mb(vq->weak_barriers);
    1252             : 
    1253           0 :         old = vq->packed.next_avail_idx - vq->num_added;
    1254           0 :         new = vq->packed.next_avail_idx;
    1255           0 :         vq->num_added = 0;
    1256             : 
    1257           0 :         snapshot.u32 = *(u32 *)vq->packed.vring.device;
    1258           0 :         flags = le16_to_cpu(snapshot.flags);
    1259             : 
    1260           0 :         LAST_ADD_TIME_CHECK(vq);
    1261           0 :         LAST_ADD_TIME_INVALID(vq);
    1262             : 
    1263           0 :         if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
    1264           0 :                 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
    1265           0 :                 goto out;
    1266             :         }
    1267             : 
    1268           0 :         off_wrap = le16_to_cpu(snapshot.off_wrap);
    1269             : 
    1270           0 :         wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
    1271           0 :         event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
    1272           0 :         if (wrap_counter != vq->packed.avail_wrap_counter)
    1273           0 :                 event_idx -= vq->packed.vring.num;
    1274             : 
    1275           0 :         needs_kick = vring_need_event(event_idx, new, old);
    1276           0 : out:
    1277           0 :         END_USE(vq);
    1278           0 :         return needs_kick;
    1279             : }
    1280             : 
    1281           0 : static void detach_buf_packed(struct vring_virtqueue *vq,
    1282             :                               unsigned int id, void **ctx)
    1283             : {
    1284           0 :         struct vring_desc_state_packed *state = NULL;
    1285           0 :         struct vring_packed_desc *desc;
    1286           0 :         unsigned int i, curr;
    1287             : 
    1288           0 :         state = &vq->packed.desc_state[id];
    1289             : 
    1290             :         /* Clear data ptr. */
    1291           0 :         state->data = NULL;
    1292             : 
    1293           0 :         vq->packed.desc_state[state->last].next = vq->free_head;
    1294           0 :         vq->free_head = id;
    1295           0 :         vq->vq.num_free += state->num;
    1296             : 
    1297           0 :         if (unlikely(vq->use_dma_api)) {
    1298             :                 curr = id;
    1299           0 :                 for (i = 0; i < state->num; i++) {
    1300           0 :                         vring_unmap_state_packed(vq,
    1301           0 :                                 &vq->packed.desc_extra[curr]);
    1302           0 :                         curr = vq->packed.desc_state[curr].next;
    1303             :                 }
    1304             :         }
    1305             : 
    1306           0 :         if (vq->indirect) {
    1307           0 :                 u32 len;
    1308             : 
    1309             :                 /* Free the indirect table, if any, now that it's unmapped. */
    1310           0 :                 desc = state->indir_desc;
    1311           0 :                 if (!desc)
    1312             :                         return;
    1313             : 
    1314           0 :                 if (vq->use_dma_api) {
    1315           0 :                         len = vq->packed.desc_extra[id].len;
    1316           0 :                         for (i = 0; i < len / sizeof(struct vring_packed_desc);
    1317           0 :                                         i++)
    1318           0 :                                 vring_unmap_desc_packed(vq, &desc[i]);
    1319             :                 }
    1320           0 :                 kfree(desc);
    1321           0 :                 state->indir_desc = NULL;
    1322           0 :         } else if (ctx) {
    1323           0 :                 *ctx = state->indir_desc;
    1324             :         }
    1325             : }
    1326             : 
    1327           0 : static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
    1328             :                                        u16 idx, bool used_wrap_counter)
    1329             : {
    1330           0 :         bool avail, used;
    1331           0 :         u16 flags;
    1332             : 
    1333           0 :         flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
    1334           0 :         avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
    1335           0 :         used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
    1336             : 
    1337           0 :         return avail == used && used == used_wrap_counter;
    1338             : }
    1339             : 
    1340           0 : static inline bool more_used_packed(const struct vring_virtqueue *vq)
    1341             : {
    1342           0 :         return is_used_desc_packed(vq, vq->last_used_idx,
    1343           0 :                         vq->packed.used_wrap_counter);
    1344             : }
    1345             : 
    1346           0 : static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
    1347             :                                           unsigned int *len,
    1348             :                                           void **ctx)
    1349             : {
    1350           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1351           0 :         u16 last_used, id;
    1352           0 :         void *ret;
    1353             : 
    1354           0 :         START_USE(vq);
    1355             : 
    1356           0 :         if (unlikely(vq->broken)) {
    1357             :                 END_USE(vq);
    1358             :                 return NULL;
    1359             :         }
    1360             : 
    1361           0 :         if (!more_used_packed(vq)) {
    1362             :                 pr_debug("No more buffers in queue\n");
    1363             :                 END_USE(vq);
    1364             :                 return NULL;
    1365             :         }
    1366             : 
    1367             :         /* Only get used elements after they have been exposed by host. */
    1368           0 :         virtio_rmb(vq->weak_barriers);
    1369             : 
    1370           0 :         last_used = vq->last_used_idx;
    1371           0 :         id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
    1372           0 :         *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
    1373             : 
    1374           0 :         if (unlikely(id >= vq->packed.vring.num)) {
    1375           0 :                 BAD_RING(vq, "id %u out of range\n", id);
    1376           0 :                 return NULL;
    1377             :         }
    1378           0 :         if (unlikely(!vq->packed.desc_state[id].data)) {
    1379           0 :                 BAD_RING(vq, "id %u is not a head!\n", id);
    1380           0 :                 return NULL;
    1381             :         }
    1382             : 
    1383             :         /* detach_buf_packed clears data, so grab it now. */
    1384           0 :         ret = vq->packed.desc_state[id].data;
    1385           0 :         detach_buf_packed(vq, id, ctx);
    1386             : 
    1387           0 :         vq->last_used_idx += vq->packed.desc_state[id].num;
    1388           0 :         if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
    1389           0 :                 vq->last_used_idx -= vq->packed.vring.num;
    1390           0 :                 vq->packed.used_wrap_counter ^= 1;
    1391             :         }
    1392             : 
    1393             :         /*
    1394             :          * If we expect an interrupt for the next entry, tell host
    1395             :          * by writing event index and flush out the write before
    1396             :          * the read in the next get_buf call.
    1397             :          */
    1398           0 :         if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
    1399           0 :                 virtio_store_mb(vq->weak_barriers,
    1400             :                                 &vq->packed.vring.driver->off_wrap,
    1401             :                                 cpu_to_le16(vq->last_used_idx |
    1402             :                                         (vq->packed.used_wrap_counter <<
    1403             :                                          VRING_PACKED_EVENT_F_WRAP_CTR)));
    1404             : 
    1405             :         LAST_ADD_TIME_INVALID(vq);
    1406             : 
    1407             :         END_USE(vq);
    1408             :         return ret;
    1409             : }
    1410             : 
    1411           0 : static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
    1412             : {
    1413           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1414             : 
    1415           0 :         if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
    1416           0 :                 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
    1417           0 :                 vq->packed.vring.driver->flags =
    1418             :                         cpu_to_le16(vq->packed.event_flags_shadow);
    1419             :         }
    1420             : }
    1421             : 
    1422           0 : static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
    1423             : {
    1424           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1425             : 
    1426           0 :         START_USE(vq);
    1427             : 
    1428             :         /*
    1429             :          * We optimistically turn back on interrupts, then check if there was
    1430             :          * more to do.
    1431             :          */
    1432             : 
    1433           0 :         if (vq->event) {
    1434           0 :                 vq->packed.vring.driver->off_wrap =
    1435           0 :                         cpu_to_le16(vq->last_used_idx |
    1436             :                                 (vq->packed.used_wrap_counter <<
    1437             :                                  VRING_PACKED_EVENT_F_WRAP_CTR));
    1438             :                 /*
    1439             :                  * We need to update event offset and event wrap
    1440             :                  * counter first before updating event flags.
    1441             :                  */
    1442           0 :                 virtio_wmb(vq->weak_barriers);
    1443             :         }
    1444             : 
    1445           0 :         if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
    1446           0 :                 vq->packed.event_flags_shadow = vq->event ?
    1447             :                                 VRING_PACKED_EVENT_FLAG_DESC :
    1448             :                                 VRING_PACKED_EVENT_FLAG_ENABLE;
    1449           0 :                 vq->packed.vring.driver->flags =
    1450             :                                 cpu_to_le16(vq->packed.event_flags_shadow);
    1451             :         }
    1452             : 
    1453           0 :         END_USE(vq);
    1454           0 :         return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
    1455             :                         VRING_PACKED_EVENT_F_WRAP_CTR);
    1456             : }
    1457             : 
    1458           0 : static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
    1459             : {
    1460           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1461           0 :         bool wrap_counter;
    1462           0 :         u16 used_idx;
    1463             : 
    1464           0 :         wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
    1465           0 :         used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
    1466             : 
    1467           0 :         return is_used_desc_packed(vq, used_idx, wrap_counter);
    1468             : }
    1469             : 
    1470           0 : static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
    1471             : {
    1472           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1473           0 :         u16 used_idx, wrap_counter;
    1474           0 :         u16 bufs;
    1475             : 
    1476           0 :         START_USE(vq);
    1477             : 
    1478             :         /*
    1479             :          * We optimistically turn back on interrupts, then check if there was
    1480             :          * more to do.
    1481             :          */
    1482             : 
    1483           0 :         if (vq->event) {
    1484             :                 /* TODO: tune this threshold */
    1485           0 :                 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
    1486           0 :                 wrap_counter = vq->packed.used_wrap_counter;
    1487             : 
    1488           0 :                 used_idx = vq->last_used_idx + bufs;
    1489           0 :                 if (used_idx >= vq->packed.vring.num) {
    1490           0 :                         used_idx -= vq->packed.vring.num;
    1491           0 :                         wrap_counter ^= 1;
    1492             :                 }
    1493             : 
    1494           0 :                 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
    1495             :                         (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
    1496             : 
    1497             :                 /*
    1498             :                  * We need to update event offset and event wrap
    1499             :                  * counter first before updating event flags.
    1500             :                  */
    1501           0 :                 virtio_wmb(vq->weak_barriers);
    1502             :         }
    1503             : 
    1504           0 :         if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
    1505           0 :                 vq->packed.event_flags_shadow = vq->event ?
    1506             :                                 VRING_PACKED_EVENT_FLAG_DESC :
    1507             :                                 VRING_PACKED_EVENT_FLAG_ENABLE;
    1508           0 :                 vq->packed.vring.driver->flags =
    1509             :                                 cpu_to_le16(vq->packed.event_flags_shadow);
    1510             :         }
    1511             : 
    1512             :         /*
    1513             :          * We need to update event suppression structure first
    1514             :          * before re-checking for more used buffers.
    1515             :          */
    1516           0 :         virtio_mb(vq->weak_barriers);
    1517             : 
    1518           0 :         if (is_used_desc_packed(vq,
    1519           0 :                                 vq->last_used_idx,
    1520           0 :                                 vq->packed.used_wrap_counter)) {
    1521           0 :                 END_USE(vq);
    1522           0 :                 return false;
    1523             :         }
    1524             : 
    1525             :         END_USE(vq);
    1526             :         return true;
    1527             : }
    1528             : 
    1529           0 : static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
    1530             : {
    1531           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1532           0 :         unsigned int i;
    1533           0 :         void *buf;
    1534             : 
    1535           0 :         START_USE(vq);
    1536             : 
    1537           0 :         for (i = 0; i < vq->packed.vring.num; i++) {
    1538           0 :                 if (!vq->packed.desc_state[i].data)
    1539           0 :                         continue;
    1540             :                 /* detach_buf clears data, so grab it now. */
    1541           0 :                 buf = vq->packed.desc_state[i].data;
    1542           0 :                 detach_buf_packed(vq, i, NULL);
    1543           0 :                 END_USE(vq);
    1544           0 :                 return buf;
    1545             :         }
    1546             :         /* That should have freed everything. */
    1547           0 :         BUG_ON(vq->vq.num_free != vq->packed.vring.num);
    1548             : 
    1549             :         END_USE(vq);
    1550             :         return NULL;
    1551             : }
    1552             : 
    1553           0 : static struct virtqueue *vring_create_virtqueue_packed(
    1554             :         unsigned int index,
    1555             :         unsigned int num,
    1556             :         unsigned int vring_align,
    1557             :         struct virtio_device *vdev,
    1558             :         bool weak_barriers,
    1559             :         bool may_reduce_num,
    1560             :         bool context,
    1561             :         bool (*notify)(struct virtqueue *),
    1562             :         void (*callback)(struct virtqueue *),
    1563             :         const char *name)
    1564             : {
    1565           0 :         struct vring_virtqueue *vq;
    1566           0 :         struct vring_packed_desc *ring;
    1567           0 :         struct vring_packed_desc_event *driver, *device;
    1568           0 :         dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
    1569           0 :         size_t ring_size_in_bytes, event_size_in_bytes;
    1570           0 :         unsigned int i;
    1571             : 
    1572           0 :         ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
    1573             : 
    1574           0 :         ring = vring_alloc_queue(vdev, ring_size_in_bytes,
    1575             :                                  &ring_dma_addr,
    1576             :                                  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
    1577           0 :         if (!ring)
    1578           0 :                 goto err_ring;
    1579             : 
    1580           0 :         event_size_in_bytes = sizeof(struct vring_packed_desc_event);
    1581             : 
    1582           0 :         driver = vring_alloc_queue(vdev, event_size_in_bytes,
    1583             :                                    &driver_event_dma_addr,
    1584             :                                    GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
    1585           0 :         if (!driver)
    1586           0 :                 goto err_driver;
    1587             : 
    1588           0 :         device = vring_alloc_queue(vdev, event_size_in_bytes,
    1589             :                                    &device_event_dma_addr,
    1590             :                                    GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
    1591           0 :         if (!device)
    1592           0 :                 goto err_device;
    1593             : 
    1594           0 :         vq = kmalloc(sizeof(*vq), GFP_KERNEL);
    1595           0 :         if (!vq)
    1596           0 :                 goto err_vq;
    1597             : 
    1598           0 :         vq->vq.callback = callback;
    1599           0 :         vq->vq.vdev = vdev;
    1600           0 :         vq->vq.name = name;
    1601           0 :         vq->vq.num_free = num;
    1602           0 :         vq->vq.index = index;
    1603           0 :         vq->we_own_ring = true;
    1604           0 :         vq->notify = notify;
    1605           0 :         vq->weak_barriers = weak_barriers;
    1606           0 :         vq->broken = false;
    1607           0 :         vq->last_used_idx = 0;
    1608           0 :         vq->num_added = 0;
    1609           0 :         vq->packed_ring = true;
    1610           0 :         vq->use_dma_api = vring_use_dma_api(vdev);
    1611             : #ifdef DEBUG
    1612             :         vq->in_use = false;
    1613             :         vq->last_add_time_valid = false;
    1614             : #endif
    1615             : 
    1616           0 :         vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
    1617             :                 !context;
    1618           0 :         vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
    1619             : 
    1620           0 :         if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
    1621           0 :                 vq->weak_barriers = false;
    1622             : 
    1623           0 :         vq->packed.ring_dma_addr = ring_dma_addr;
    1624           0 :         vq->packed.driver_event_dma_addr = driver_event_dma_addr;
    1625           0 :         vq->packed.device_event_dma_addr = device_event_dma_addr;
    1626             : 
    1627           0 :         vq->packed.ring_size_in_bytes = ring_size_in_bytes;
    1628           0 :         vq->packed.event_size_in_bytes = event_size_in_bytes;
    1629             : 
    1630           0 :         vq->packed.vring.num = num;
    1631           0 :         vq->packed.vring.desc = ring;
    1632           0 :         vq->packed.vring.driver = driver;
    1633           0 :         vq->packed.vring.device = device;
    1634             : 
    1635           0 :         vq->packed.next_avail_idx = 0;
    1636           0 :         vq->packed.avail_wrap_counter = 1;
    1637           0 :         vq->packed.used_wrap_counter = 1;
    1638           0 :         vq->packed.event_flags_shadow = 0;
    1639           0 :         vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
    1640             : 
    1641           0 :         vq->packed.desc_state = kmalloc_array(num,
    1642             :                         sizeof(struct vring_desc_state_packed),
    1643             :                         GFP_KERNEL);
    1644           0 :         if (!vq->packed.desc_state)
    1645           0 :                 goto err_desc_state;
    1646             : 
    1647           0 :         memset(vq->packed.desc_state, 0,
    1648             :                 num * sizeof(struct vring_desc_state_packed));
    1649             : 
    1650             :         /* Put everything in free lists. */
    1651           0 :         vq->free_head = 0;
    1652           0 :         for (i = 0; i < num-1; i++)
    1653           0 :                 vq->packed.desc_state[i].next = i + 1;
    1654             : 
    1655           0 :         vq->packed.desc_extra = kmalloc_array(num,
    1656             :                         sizeof(struct vring_desc_extra_packed),
    1657             :                         GFP_KERNEL);
    1658           0 :         if (!vq->packed.desc_extra)
    1659           0 :                 goto err_desc_extra;
    1660             : 
    1661           0 :         memset(vq->packed.desc_extra, 0,
    1662             :                 num * sizeof(struct vring_desc_extra_packed));
    1663             : 
    1664             :         /* No callback?  Tell other side not to bother us. */
    1665           0 :         if (!callback) {
    1666           0 :                 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
    1667           0 :                 vq->packed.vring.driver->flags =
    1668             :                         cpu_to_le16(vq->packed.event_flags_shadow);
    1669             :         }
    1670             : 
    1671           0 :         list_add_tail(&vq->vq.list, &vdev->vqs);
    1672           0 :         return &vq->vq;
    1673             : 
    1674           0 : err_desc_extra:
    1675           0 :         kfree(vq->packed.desc_state);
    1676           0 : err_desc_state:
    1677           0 :         kfree(vq);
    1678           0 : err_vq:
    1679           0 :         vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
    1680           0 : err_device:
    1681           0 :         vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
    1682           0 : err_driver:
    1683           0 :         vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
    1684             : err_ring:
    1685             :         return NULL;
    1686             : }
    1687             : 
    1688             : 
    1689             : /*
    1690             :  * Generic functions and exported symbols.
    1691             :  */
    1692             : 
    1693        5548 : static inline int virtqueue_add(struct virtqueue *_vq,
    1694             :                                 struct scatterlist *sgs[],
    1695             :                                 unsigned int total_sg,
    1696             :                                 unsigned int out_sgs,
    1697             :                                 unsigned int in_sgs,
    1698             :                                 void *data,
    1699             :                                 void *ctx,
    1700             :                                 gfp_t gfp)
    1701             : {
    1702        5548 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1703             : 
    1704           0 :         return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
    1705        5548 :                                         out_sgs, in_sgs, data, ctx, gfp) :
    1706        5548 :                                  virtqueue_add_split(_vq, sgs, total_sg,
    1707             :                                         out_sgs, in_sgs, data, ctx, gfp);
    1708             : }
    1709             : 
    1710             : /**
    1711             :  * virtqueue_add_sgs - expose buffers to other end
    1712             :  * @_vq: the struct virtqueue we're talking about.
    1713             :  * @sgs: array of terminated scatterlists.
    1714             :  * @out_sgs: the number of scatterlists readable by other side
    1715             :  * @in_sgs: the number of scatterlists which are writable (after readable ones)
    1716             :  * @data: the token identifying the buffer.
    1717             :  * @gfp: how to do memory allocations (if necessary).
    1718             :  *
    1719             :  * Caller must ensure we don't call this with other virtqueue operations
    1720             :  * at the same time (except where noted).
    1721             :  *
    1722             :  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
    1723             :  */
    1724        3379 : int virtqueue_add_sgs(struct virtqueue *_vq,
    1725             :                       struct scatterlist *sgs[],
    1726             :                       unsigned int out_sgs,
    1727             :                       unsigned int in_sgs,
    1728             :                       void *data,
    1729             :                       gfp_t gfp)
    1730             : {
    1731        3379 :         unsigned int i, total_sg = 0;
    1732             : 
    1733             :         /* Count them first. */
    1734       13325 :         for (i = 0; i < out_sgs + in_sgs; i++) {
    1735        9946 :                 struct scatterlist *sg;
    1736             : 
    1737       27432 :                 for (sg = sgs[i]; sg; sg = sg_next(sg))
    1738       17486 :                         total_sg++;
    1739             :         }
    1740        3379 :         return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
    1741             :                              data, NULL, gfp);
    1742             : }
    1743             : EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
    1744             : 
    1745             : /**
    1746             :  * virtqueue_add_outbuf - expose output buffers to other end
    1747             :  * @vq: the struct virtqueue we're talking about.
    1748             :  * @sg: scatterlist (must be well-formed and terminated!)
    1749             :  * @num: the number of entries in @sg readable by other side
    1750             :  * @data: the token identifying the buffer.
    1751             :  * @gfp: how to do memory allocations (if necessary).
    1752             :  *
    1753             :  * Caller must ensure we don't call this with other virtqueue operations
    1754             :  * at the same time (except where noted).
    1755             :  *
    1756             :  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
    1757             :  */
    1758         447 : int virtqueue_add_outbuf(struct virtqueue *vq,
    1759             :                          struct scatterlist *sg, unsigned int num,
    1760             :                          void *data,
    1761             :                          gfp_t gfp)
    1762             : {
    1763         447 :         return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
    1764             : }
    1765             : EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
    1766             : 
    1767             : /**
    1768             :  * virtqueue_add_inbuf - expose input buffers to other end
    1769             :  * @vq: the struct virtqueue we're talking about.
    1770             :  * @sg: scatterlist (must be well-formed and terminated!)
    1771             :  * @num: the number of entries in @sg writable by other side
    1772             :  * @data: the token identifying the buffer.
    1773             :  * @gfp: how to do memory allocations (if necessary).
    1774             :  *
    1775             :  * Caller must ensure we don't call this with other virtqueue operations
    1776             :  * at the same time (except where noted).
    1777             :  *
    1778             :  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
    1779             :  */
    1780           0 : int virtqueue_add_inbuf(struct virtqueue *vq,
    1781             :                         struct scatterlist *sg, unsigned int num,
    1782             :                         void *data,
    1783             :                         gfp_t gfp)
    1784             : {
    1785           0 :         return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
    1786             : }
    1787             : EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
    1788             : 
    1789             : /**
    1790             :  * virtqueue_add_inbuf_ctx - expose input buffers to other end
    1791             :  * @vq: the struct virtqueue we're talking about.
    1792             :  * @sg: scatterlist (must be well-formed and terminated!)
    1793             :  * @num: the number of entries in @sg writable by other side
    1794             :  * @data: the token identifying the buffer.
    1795             :  * @ctx: extra context for the token
    1796             :  * @gfp: how to do memory allocations (if necessary).
    1797             :  *
    1798             :  * Caller must ensure we don't call this with other virtqueue operations
    1799             :  * at the same time (except where noted).
    1800             :  *
    1801             :  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
    1802             :  */
    1803        1722 : int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
    1804             :                         struct scatterlist *sg, unsigned int num,
    1805             :                         void *data,
    1806             :                         void *ctx,
    1807             :                         gfp_t gfp)
    1808             : {
    1809        1722 :         return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
    1810             : }
    1811             : EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
    1812             : 
    1813             : /**
    1814             :  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
    1815             :  * @_vq: the struct virtqueue
    1816             :  *
    1817             :  * Instead of virtqueue_kick(), you can do:
    1818             :  *      if (virtqueue_kick_prepare(vq))
    1819             :  *              virtqueue_notify(vq);
    1820             :  *
    1821             :  * This is sometimes useful because the virtqueue_kick_prepare() needs
    1822             :  * to be serialized, but the actual virtqueue_notify() call does not.
    1823             :  */
    1824        3214 : bool virtqueue_kick_prepare(struct virtqueue *_vq)
    1825             : {
    1826        3214 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1827             : 
    1828        3214 :         return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
    1829        3214 :                                  virtqueue_kick_prepare_split(_vq);
    1830             : }
    1831             : EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
    1832             : 
    1833             : /**
    1834             :  * virtqueue_notify - second half of split virtqueue_kick call.
    1835             :  * @_vq: the struct virtqueue
    1836             :  *
    1837             :  * This does not need to be serialized.
    1838             :  *
    1839             :  * Returns false if host notify failed or queue is broken, otherwise true.
    1840             :  */
    1841        3185 : bool virtqueue_notify(struct virtqueue *_vq)
    1842             : {
    1843        3185 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1844             : 
    1845        3185 :         if (unlikely(vq->broken))
    1846             :                 return false;
    1847             : 
    1848             :         /* Prod other side to tell it about changes. */
    1849        3185 :         if (!vq->notify(_vq)) {
    1850           0 :                 vq->broken = true;
    1851           0 :                 return false;
    1852             :         }
    1853             :         return true;
    1854             : }
    1855             : EXPORT_SYMBOL_GPL(virtqueue_notify);
    1856             : 
    1857             : /**
    1858             :  * virtqueue_kick - update after add_buf
    1859             :  * @vq: the struct virtqueue
    1860             :  *
    1861             :  * After one or more virtqueue_add_* calls, invoke this to kick
    1862             :  * the other side.
    1863             :  *
    1864             :  * Caller must ensure we don't call this with other virtqueue
    1865             :  * operations at the same time (except where noted).
    1866             :  *
    1867             :  * Returns false if kick failed, otherwise true.
    1868             :  */
    1869           6 : bool virtqueue_kick(struct virtqueue *vq)
    1870             : {
    1871           6 :         if (virtqueue_kick_prepare(vq))
    1872           6 :                 return virtqueue_notify(vq);
    1873             :         return true;
    1874             : }
    1875             : EXPORT_SYMBOL_GPL(virtqueue_kick);
    1876             : 
    1877             : /**
    1878             :  * virtqueue_get_buf - get the next used buffer
    1879             :  * @_vq: the struct virtqueue we're talking about.
    1880             :  * @len: the length written into the buffer
    1881             :  * @ctx: extra context for the token
    1882             :  *
    1883             :  * If the device wrote data into the buffer, @len will be set to the
    1884             :  * amount written.  This means you don't need to clear the buffer
    1885             :  * beforehand to ensure there's no data leakage in the case of short
    1886             :  * writes.
    1887             :  *
    1888             :  * Caller must ensure we don't call this with other virtqueue
    1889             :  * operations at the same time (except where noted).
    1890             :  *
    1891             :  * Returns NULL if there are no used buffers, or the "data" token
    1892             :  * handed to virtqueue_add_*().
    1893             :  */
    1894       12772 : void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
    1895             :                             void **ctx)
    1896             : {
    1897       12772 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1898             : 
    1899       12772 :         return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
    1900       12772 :                                  virtqueue_get_buf_ctx_split(_vq, len, ctx);
    1901             : }
    1902             : EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
    1903             : 
    1904       11633 : void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
    1905             : {
    1906       11633 :         return virtqueue_get_buf_ctx(_vq, len, NULL);
    1907             : }
    1908             : EXPORT_SYMBOL_GPL(virtqueue_get_buf);
    1909             : /**
    1910             :  * virtqueue_disable_cb - disable callbacks
    1911             :  * @_vq: the struct virtqueue we're talking about.
    1912             :  *
    1913             :  * Note that this is not necessarily synchronous, hence unreliable and only
    1914             :  * useful as an optimization.
    1915             :  *
    1916             :  * Unlike other operations, this need not be serialized.
    1917             :  */
    1918        3968 : void virtqueue_disable_cb(struct virtqueue *_vq)
    1919             : {
    1920        3968 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1921             : 
    1922        3968 :         if (vq->packed_ring)
    1923           0 :                 virtqueue_disable_cb_packed(_vq);
    1924             :         else
    1925        3968 :                 virtqueue_disable_cb_split(_vq);
    1926        3968 : }
    1927             : EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
    1928             : 
    1929             : /**
    1930             :  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
    1931             :  * @_vq: the struct virtqueue we're talking about.
    1932             :  *
    1933             :  * This re-enables callbacks; it returns current queue state
    1934             :  * in an opaque unsigned value. This value should be later tested by
    1935             :  * virtqueue_poll, to detect a possible race between the driver checking for
    1936             :  * more work, and enabling callbacks.
    1937             :  *
    1938             :  * Caller must ensure we don't call this with other virtqueue
    1939             :  * operations at the same time (except where noted).
    1940             :  */
    1941        3525 : unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
    1942             : {
    1943        3525 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1944             : 
    1945        3525 :         return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
    1946        3525 :                                  virtqueue_enable_cb_prepare_split(_vq);
    1947             : }
    1948             : EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
    1949             : 
    1950             : /**
    1951             :  * virtqueue_poll - query pending used buffers
    1952             :  * @_vq: the struct virtqueue we're talking about.
    1953             :  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
    1954             :  *
    1955             :  * Returns "true" if there are pending used buffers in the queue.
    1956             :  *
    1957             :  * This does not need to be serialized.
    1958             :  */
    1959        3448 : bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
    1960             : {
    1961        3448 :         struct vring_virtqueue *vq = to_vvq(_vq);
    1962             : 
    1963        3448 :         if (unlikely(vq->broken))
    1964             :                 return false;
    1965             : 
    1966        3448 :         virtio_mb(vq->weak_barriers);
    1967        3448 :         return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
    1968        3448 :                                  virtqueue_poll_split(_vq, last_used_idx);
    1969             : }
    1970             : EXPORT_SYMBOL_GPL(virtqueue_poll);
    1971             : 
    1972             : /**
    1973             :  * virtqueue_enable_cb - restart callbacks after disable_cb.
    1974             :  * @_vq: the struct virtqueue we're talking about.
    1975             :  *
    1976             :  * This re-enables callbacks; it returns "false" if there are pending
    1977             :  * buffers in the queue, to detect a possible race between the driver
    1978             :  * checking for more work, and enabling callbacks.
    1979             :  *
    1980             :  * Caller must ensure we don't call this with other virtqueue
    1981             :  * operations at the same time (except where noted).
    1982             :  */
    1983        2672 : bool virtqueue_enable_cb(struct virtqueue *_vq)
    1984             : {
    1985        2672 :         unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
    1986             : 
    1987        2672 :         return !virtqueue_poll(_vq, last_used_idx);
    1988             : }
    1989             : EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
    1990             : 
    1991             : /**
    1992             :  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
    1993             :  * @_vq: the struct virtqueue we're talking about.
    1994             :  *
    1995             :  * This re-enables callbacks but hints to the other side to delay
    1996             :  * interrupts until most of the available buffers have been processed;
    1997             :  * it returns "false" if there are many pending buffers in the queue,
    1998             :  * to detect a possible race between the driver checking for more work,
    1999             :  * and enabling callbacks.
    2000             :  *
    2001             :  * Caller must ensure we don't call this with other virtqueue
    2002             :  * operations at the same time (except where noted).
    2003             :  */
    2004         447 : bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
    2005             : {
    2006         447 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2007             : 
    2008         447 :         return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
    2009         447 :                                  virtqueue_enable_cb_delayed_split(_vq);
    2010             : }
    2011             : EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
    2012             : 
    2013             : /**
    2014             :  * virtqueue_detach_unused_buf - detach first unused buffer
    2015             :  * @_vq: the struct virtqueue we're talking about.
    2016             :  *
    2017             :  * Returns NULL or the "data" token handed to virtqueue_add_*().
    2018             :  * This is not valid on an active queue; it is useful only for device
    2019             :  * shutdown.
    2020             :  */
    2021           0 : void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
    2022             : {
    2023           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2024             : 
    2025           0 :         return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
    2026           0 :                                  virtqueue_detach_unused_buf_split(_vq);
    2027             : }
    2028             : EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
    2029             : 
    2030        4127 : static inline bool more_used(const struct vring_virtqueue *vq)
    2031             : {
    2032        4127 :         return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
    2033             : }
    2034             : 
    2035        4127 : irqreturn_t vring_interrupt(int irq, void *_vq)
    2036             : {
    2037        4127 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2038             : 
    2039        4127 :         if (!more_used(vq)) {
    2040             :                 pr_debug("virtqueue interrupt with no work for %p\n", vq);
    2041             :                 return IRQ_NONE;
    2042             :         }
    2043             : 
    2044        3549 :         if (unlikely(vq->broken))
    2045             :                 return IRQ_HANDLED;
    2046             : 
    2047        3549 :         pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
    2048        3549 :         if (vq->vq.callback)
    2049        3548 :                 vq->vq.callback(&vq->vq);
    2050             : 
    2051             :         return IRQ_HANDLED;
    2052             : }
    2053             : EXPORT_SYMBOL_GPL(vring_interrupt);
    2054             : 
    2055             : /* Only available for split ring */
    2056           4 : struct virtqueue *__vring_new_virtqueue(unsigned int index,
    2057             :                                         struct vring vring,
    2058             :                                         struct virtio_device *vdev,
    2059             :                                         bool weak_barriers,
    2060             :                                         bool context,
    2061             :                                         bool (*notify)(struct virtqueue *),
    2062             :                                         void (*callback)(struct virtqueue *),
    2063             :                                         const char *name)
    2064             : {
    2065           4 :         unsigned int i;
    2066           4 :         struct vring_virtqueue *vq;
    2067             : 
    2068           4 :         if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
    2069             :                 return NULL;
    2070             : 
    2071           4 :         vq = kmalloc(sizeof(*vq), GFP_KERNEL);
    2072           4 :         if (!vq)
    2073             :                 return NULL;
    2074             : 
    2075           4 :         vq->packed_ring = false;
    2076           4 :         vq->vq.callback = callback;
    2077           4 :         vq->vq.vdev = vdev;
    2078           4 :         vq->vq.name = name;
    2079           4 :         vq->vq.num_free = vring.num;
    2080           4 :         vq->vq.index = index;
    2081           4 :         vq->we_own_ring = false;
    2082           4 :         vq->notify = notify;
    2083           4 :         vq->weak_barriers = weak_barriers;
    2084           4 :         vq->broken = false;
    2085           4 :         vq->last_used_idx = 0;
    2086           4 :         vq->num_added = 0;
    2087           4 :         vq->use_dma_api = vring_use_dma_api(vdev);
    2088             : #ifdef DEBUG
    2089             :         vq->in_use = false;
    2090             :         vq->last_add_time_valid = false;
    2091             : #endif
    2092             : 
    2093           4 :         vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
    2094             :                 !context;
    2095           4 :         vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
    2096             : 
    2097           4 :         if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
    2098           0 :                 vq->weak_barriers = false;
    2099             : 
    2100           4 :         vq->split.queue_dma_addr = 0;
    2101           4 :         vq->split.queue_size_in_bytes = 0;
    2102             : 
    2103           4 :         vq->split.vring = vring;
    2104           4 :         vq->split.avail_flags_shadow = 0;
    2105           4 :         vq->split.avail_idx_shadow = 0;
    2106             : 
    2107             :         /* No callback?  Tell other side not to bother us. */
    2108           4 :         if (!callback) {
    2109           1 :                 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
    2110           1 :                 if (!vq->event)
    2111           0 :                         vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
    2112             :                                         vq->split.avail_flags_shadow);
    2113             :         }
    2114             : 
    2115           4 :         vq->split.desc_state = kmalloc_array(vring.num,
    2116             :                         sizeof(struct vring_desc_state_split), GFP_KERNEL);
    2117           4 :         if (!vq->split.desc_state) {
    2118           0 :                 kfree(vq);
    2119           0 :                 return NULL;
    2120             :         }
    2121             : 
    2122             :         /* Put everything in free lists. */
    2123           4 :         vq->free_head = 0;
    2124        4096 :         for (i = 0; i < vring.num-1; i++)
    2125        4092 :                 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
    2126           4 :         memset(vq->split.desc_state, 0, vring.num *
    2127             :                         sizeof(struct vring_desc_state_split));
    2128             : 
    2129           4 :         list_add_tail(&vq->vq.list, &vdev->vqs);
    2130           4 :         return &vq->vq;
    2131             : }
    2132             : EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
    2133             : 
    2134           4 : struct virtqueue *vring_create_virtqueue(
    2135             :         unsigned int index,
    2136             :         unsigned int num,
    2137             :         unsigned int vring_align,
    2138             :         struct virtio_device *vdev,
    2139             :         bool weak_barriers,
    2140             :         bool may_reduce_num,
    2141             :         bool context,
    2142             :         bool (*notify)(struct virtqueue *),
    2143             :         void (*callback)(struct virtqueue *),
    2144             :         const char *name)
    2145             : {
    2146             : 
    2147           4 :         if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
    2148           0 :                 return vring_create_virtqueue_packed(index, num, vring_align,
    2149             :                                 vdev, weak_barriers, may_reduce_num,
    2150             :                                 context, notify, callback, name);
    2151             : 
    2152           4 :         return vring_create_virtqueue_split(index, num, vring_align,
    2153             :                         vdev, weak_barriers, may_reduce_num,
    2154             :                         context, notify, callback, name);
    2155             : }
    2156             : EXPORT_SYMBOL_GPL(vring_create_virtqueue);
    2157             : 
    2158             : /* Only available for split ring */
    2159           0 : struct virtqueue *vring_new_virtqueue(unsigned int index,
    2160             :                                       unsigned int num,
    2161             :                                       unsigned int vring_align,
    2162             :                                       struct virtio_device *vdev,
    2163             :                                       bool weak_barriers,
    2164             :                                       bool context,
    2165             :                                       void *pages,
    2166             :                                       bool (*notify)(struct virtqueue *vq),
    2167             :                                       void (*callback)(struct virtqueue *vq),
    2168             :                                       const char *name)
    2169             : {
    2170           0 :         struct vring vring;
    2171             : 
    2172           0 :         if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
    2173             :                 return NULL;
    2174             : 
    2175           0 :         vring_init(&vring, num, pages, vring_align);
    2176           0 :         return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
    2177             :                                      notify, callback, name);
    2178             : }
    2179             : EXPORT_SYMBOL_GPL(vring_new_virtqueue);
    2180             : 
    2181           0 : void vring_del_virtqueue(struct virtqueue *_vq)
    2182             : {
    2183           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2184             : 
    2185           0 :         if (vq->we_own_ring) {
    2186           0 :                 if (vq->packed_ring) {
    2187           0 :                         vring_free_queue(vq->vq.vdev,
    2188             :                                          vq->packed.ring_size_in_bytes,
    2189           0 :                                          vq->packed.vring.desc,
    2190             :                                          vq->packed.ring_dma_addr);
    2191             : 
    2192           0 :                         vring_free_queue(vq->vq.vdev,
    2193             :                                          vq->packed.event_size_in_bytes,
    2194           0 :                                          vq->packed.vring.driver,
    2195             :                                          vq->packed.driver_event_dma_addr);
    2196             : 
    2197           0 :                         vring_free_queue(vq->vq.vdev,
    2198             :                                          vq->packed.event_size_in_bytes,
    2199           0 :                                          vq->packed.vring.device,
    2200             :                                          vq->packed.device_event_dma_addr);
    2201             : 
    2202           0 :                         kfree(vq->packed.desc_state);
    2203           0 :                         kfree(vq->packed.desc_extra);
    2204             :                 } else {
    2205           0 :                         vring_free_queue(vq->vq.vdev,
    2206             :                                          vq->split.queue_size_in_bytes,
    2207           0 :                                          vq->split.vring.desc,
    2208             :                                          vq->split.queue_dma_addr);
    2209             :                 }
    2210             :         }
    2211           0 :         if (!vq->packed_ring)
    2212           0 :                 kfree(vq->split.desc_state);
    2213           0 :         list_del(&_vq->list);
    2214           0 :         kfree(vq);
    2215           0 : }
    2216             : EXPORT_SYMBOL_GPL(vring_del_virtqueue);
    2217             : 
    2218             : /* Manipulates transport-specific feature bits. */
    2219           2 : void vring_transport_features(struct virtio_device *vdev)
    2220             : {
    2221           2 :         unsigned int i;
    2222             : 
    2223          22 :         for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
    2224          20 :                 switch (i) {
    2225             :                 case VIRTIO_RING_F_INDIRECT_DESC:
    2226             :                         break;
    2227             :                 case VIRTIO_RING_F_EVENT_IDX:
    2228             :                         break;
    2229             :                 case VIRTIO_F_VERSION_1:
    2230             :                         break;
    2231             :                 case VIRTIO_F_ACCESS_PLATFORM:
    2232             :                         break;
    2233             :                 case VIRTIO_F_RING_PACKED:
    2234             :                         break;
    2235             :                 case VIRTIO_F_ORDER_PLATFORM:
    2236             :                         break;
    2237           8 :                 default:
    2238             :                         /* We don't understand this bit. */
    2239           8 :                         __virtio_clear_bit(vdev, i);
    2240             :                 }
    2241             :         }
    2242           2 : }
    2243             : EXPORT_SYMBOL_GPL(vring_transport_features);
    2244             : 
    2245             : /**
    2246             :  * virtqueue_get_vring_size - return the size of the virtqueue's vring
    2247             :  * @_vq: the struct virtqueue containing the vring of interest.
    2248             :  *
    2249             :  * Returns the size of the vring.  This is mainly used for boasting to
    2250             :  * userspace.  Unlike other operations, this need not be serialized.
    2251             :  */
    2252         422 : unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
    2253             : {
    2254             : 
    2255         422 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2256             : 
    2257         422 :         return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
    2258             : }
    2259             : EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
    2260             : 
    2261        6512 : bool virtqueue_is_broken(struct virtqueue *_vq)
    2262             : {
    2263        6512 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2264             : 
    2265        6512 :         return vq->broken;
    2266             : }
    2267             : EXPORT_SYMBOL_GPL(virtqueue_is_broken);
    2268             : 
    2269             : /*
    2270             :  * This should prevent the device from being used, allowing drivers to
    2271             :  * recover.  You may need to grab appropriate locks to flush.
    2272             :  */
    2273           0 : void virtio_break_device(struct virtio_device *dev)
    2274             : {
    2275           0 :         struct virtqueue *_vq;
    2276             : 
    2277           0 :         list_for_each_entry(_vq, &dev->vqs, list) {
    2278           0 :                 struct vring_virtqueue *vq = to_vvq(_vq);
    2279           0 :                 vq->broken = true;
    2280             :         }
    2281           0 : }
    2282             : EXPORT_SYMBOL_GPL(virtio_break_device);
    2283             : 
    2284           4 : dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
    2285             : {
    2286           4 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2287             : 
    2288           4 :         BUG_ON(!vq->we_own_ring);
    2289             : 
    2290           4 :         if (vq->packed_ring)
    2291           0 :                 return vq->packed.ring_dma_addr;
    2292             : 
    2293           4 :         return vq->split.queue_dma_addr;
    2294             : }
    2295             : EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
    2296             : 
    2297           0 : dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
    2298             : {
    2299           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2300             : 
    2301           0 :         BUG_ON(!vq->we_own_ring);
    2302             : 
    2303           0 :         if (vq->packed_ring)
    2304           0 :                 return vq->packed.driver_event_dma_addr;
    2305             : 
    2306           0 :         return vq->split.queue_dma_addr +
    2307           0 :                 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
    2308             : }
    2309             : EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
    2310             : 
    2311           0 : dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
    2312             : {
    2313           0 :         struct vring_virtqueue *vq = to_vvq(_vq);
    2314             : 
    2315           0 :         BUG_ON(!vq->we_own_ring);
    2316             : 
    2317           0 :         if (vq->packed_ring)
    2318           0 :                 return vq->packed.device_event_dma_addr;
    2319             : 
    2320           0 :         return vq->split.queue_dma_addr +
    2321           0 :                 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
    2322             : }
    2323             : EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
    2324             : 
    2325             : /* Only available for split ring */
    2326           0 : const struct vring *virtqueue_get_vring(struct virtqueue *vq)
    2327             : {
    2328           0 :         return &to_vvq(vq)->split.vring;
    2329             : }
    2330             : EXPORT_SYMBOL_GPL(virtqueue_get_vring);
    2331             : 
    2332             : MODULE_LICENSE("GPL");

Generated by: LCOV version 1.14