LCOV - code coverage report
Current view: top level - drivers/scsi - virtio_scsi.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 9 447 2.0 %
Date: 2021-04-22 12:43:58 Functions: 1 34 2.9 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Virtio SCSI HBA driver
       4             :  *
       5             :  * Copyright IBM Corp. 2010
       6             :  * Copyright Red Hat, Inc. 2011
       7             :  *
       8             :  * Authors:
       9             :  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
      10             :  *  Paolo Bonzini   <pbonzini@redhat.com>
      11             :  */
      12             : 
      13             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      14             : 
      15             : #include <linux/module.h>
      16             : #include <linux/slab.h>
      17             : #include <linux/mempool.h>
      18             : #include <linux/interrupt.h>
      19             : #include <linux/virtio.h>
      20             : #include <linux/virtio_ids.h>
      21             : #include <linux/virtio_config.h>
      22             : #include <linux/virtio_scsi.h>
      23             : #include <linux/cpu.h>
      24             : #include <linux/blkdev.h>
      25             : #include <scsi/scsi_host.h>
      26             : #include <scsi/scsi_device.h>
      27             : #include <scsi/scsi_cmnd.h>
      28             : #include <scsi/scsi_tcq.h>
      29             : #include <scsi/scsi_devinfo.h>
      30             : #include <linux/seqlock.h>
      31             : #include <linux/blk-mq-virtio.h>
      32             : 
      33             : #include "sd.h"
      34             : 
      35             : #define VIRTIO_SCSI_MEMPOOL_SZ 64
      36             : #define VIRTIO_SCSI_EVENT_LEN 8
      37             : #define VIRTIO_SCSI_VQ_BASE 2
      38             : 
      39             : /* Command queue element */
      40             : struct virtio_scsi_cmd {
      41             :         struct scsi_cmnd *sc;
      42             :         struct completion *comp;
      43             :         union {
      44             :                 struct virtio_scsi_cmd_req       cmd;
      45             :                 struct virtio_scsi_cmd_req_pi    cmd_pi;
      46             :                 struct virtio_scsi_ctrl_tmf_req  tmf;
      47             :                 struct virtio_scsi_ctrl_an_req   an;
      48             :         } req;
      49             :         union {
      50             :                 struct virtio_scsi_cmd_resp      cmd;
      51             :                 struct virtio_scsi_ctrl_tmf_resp tmf;
      52             :                 struct virtio_scsi_ctrl_an_resp  an;
      53             :                 struct virtio_scsi_event         evt;
      54             :         } resp;
      55             : } ____cacheline_aligned_in_smp;
      56             : 
      57             : struct virtio_scsi_event_node {
      58             :         struct virtio_scsi *vscsi;
      59             :         struct virtio_scsi_event event;
      60             :         struct work_struct work;
      61             : };
      62             : 
      63             : struct virtio_scsi_vq {
      64             :         /* Protects vq */
      65             :         spinlock_t vq_lock;
      66             : 
      67             :         struct virtqueue *vq;
      68             : };
      69             : 
      70             : /* Driver instance state */
      71             : struct virtio_scsi {
      72             :         struct virtio_device *vdev;
      73             : 
      74             :         /* Get some buffers ready for event vq */
      75             :         struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
      76             : 
      77             :         u32 num_queues;
      78             : 
      79             :         struct hlist_node node;
      80             : 
      81             :         /* Protected by event_vq lock */
      82             :         bool stop_events;
      83             : 
      84             :         struct virtio_scsi_vq ctrl_vq;
      85             :         struct virtio_scsi_vq event_vq;
      86             :         struct virtio_scsi_vq req_vqs[];
      87             : };
      88             : 
      89             : static struct kmem_cache *virtscsi_cmd_cache;
      90             : static mempool_t *virtscsi_cmd_pool;
      91             : 
      92           0 : static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
      93             : {
      94           0 :         return vdev->priv;
      95             : }
      96             : 
      97           0 : static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
      98             : {
      99           0 :         if (resid)
     100           0 :                 scsi_set_resid(sc, resid);
     101             : }
     102             : 
     103             : /*
     104             :  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
     105             :  *
     106             :  * Called with vq_lock held.
     107             :  */
     108           0 : static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
     109             : {
     110           0 :         struct virtio_scsi_cmd *cmd = buf;
     111           0 :         struct scsi_cmnd *sc = cmd->sc;
     112           0 :         struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
     113             : 
     114           0 :         dev_dbg(&sc->device->sdev_gendev,
     115             :                 "cmd %p response %u status %#02x sense_len %u\n",
     116             :                 sc, resp->response, resp->status, resp->sense_len);
     117             : 
     118           0 :         sc->result = resp->status;
     119           0 :         virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
     120           0 :         switch (resp->response) {
     121             :         case VIRTIO_SCSI_S_OK:
     122           0 :                 set_host_byte(sc, DID_OK);
     123             :                 break;
     124             :         case VIRTIO_SCSI_S_OVERRUN:
     125           0 :                 set_host_byte(sc, DID_ERROR);
     126             :                 break;
     127             :         case VIRTIO_SCSI_S_ABORTED:
     128           0 :                 set_host_byte(sc, DID_ABORT);
     129             :                 break;
     130             :         case VIRTIO_SCSI_S_BAD_TARGET:
     131           0 :                 set_host_byte(sc, DID_BAD_TARGET);
     132             :                 break;
     133             :         case VIRTIO_SCSI_S_RESET:
     134           0 :                 set_host_byte(sc, DID_RESET);
     135             :                 break;
     136             :         case VIRTIO_SCSI_S_BUSY:
     137           0 :                 set_host_byte(sc, DID_BUS_BUSY);
     138             :                 break;
     139             :         case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
     140           0 :                 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
     141             :                 break;
     142             :         case VIRTIO_SCSI_S_TARGET_FAILURE:
     143           0 :                 set_host_byte(sc, DID_TARGET_FAILURE);
     144             :                 break;
     145             :         case VIRTIO_SCSI_S_NEXUS_FAILURE:
     146           0 :                 set_host_byte(sc, DID_NEXUS_FAILURE);
     147             :                 break;
     148           0 :         default:
     149           0 :                 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
     150             :                             resp->response);
     151           0 :                 fallthrough;
     152           0 :         case VIRTIO_SCSI_S_FAILURE:
     153           0 :                 set_host_byte(sc, DID_ERROR);
     154             :                 break;
     155             :         }
     156             : 
     157           0 :         WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
     158             :                 VIRTIO_SCSI_SENSE_SIZE);
     159           0 :         if (sc->sense_buffer) {
     160           0 :                 memcpy(sc->sense_buffer, resp->sense,
     161           0 :                        min_t(u32,
     162             :                              virtio32_to_cpu(vscsi->vdev, resp->sense_len),
     163             :                              VIRTIO_SCSI_SENSE_SIZE));
     164           0 :                 if (resp->sense_len)
     165           0 :                         set_driver_byte(sc, DRIVER_SENSE);
     166             :         }
     167             : 
     168           0 :         sc->scsi_done(sc);
     169           0 : }
     170             : 
     171           0 : static void virtscsi_vq_done(struct virtio_scsi *vscsi,
     172             :                              struct virtio_scsi_vq *virtscsi_vq,
     173             :                              void (*fn)(struct virtio_scsi *vscsi, void *buf))
     174             : {
     175           0 :         void *buf;
     176           0 :         unsigned int len;
     177           0 :         unsigned long flags;
     178           0 :         struct virtqueue *vq = virtscsi_vq->vq;
     179             : 
     180           0 :         spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
     181           0 :         do {
     182           0 :                 virtqueue_disable_cb(vq);
     183           0 :                 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
     184           0 :                         fn(vscsi, buf);
     185             : 
     186           0 :                 if (unlikely(virtqueue_is_broken(vq)))
     187             :                         break;
     188           0 :         } while (!virtqueue_enable_cb(vq));
     189           0 :         spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
     190           0 : }
     191             : 
     192           0 : static void virtscsi_req_done(struct virtqueue *vq)
     193             : {
     194           0 :         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
     195           0 :         struct virtio_scsi *vscsi = shost_priv(sh);
     196           0 :         int index = vq->index - VIRTIO_SCSI_VQ_BASE;
     197           0 :         struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
     198             : 
     199           0 :         virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
     200           0 : };
     201             : 
     202           0 : static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
     203             : {
     204           0 :         int i, num_vqs;
     205             : 
     206           0 :         num_vqs = vscsi->num_queues;
     207           0 :         for (i = 0; i < num_vqs; i++)
     208           0 :                 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
     209             :                                  virtscsi_complete_cmd);
     210           0 : }
     211             : 
     212           0 : static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
     213             : {
     214           0 :         struct virtio_scsi_cmd *cmd = buf;
     215             : 
     216           0 :         if (cmd->comp)
     217           0 :                 complete(cmd->comp);
     218           0 : }
     219             : 
     220           0 : static void virtscsi_ctrl_done(struct virtqueue *vq)
     221             : {
     222           0 :         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
     223           0 :         struct virtio_scsi *vscsi = shost_priv(sh);
     224             : 
     225           0 :         virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
     226           0 : };
     227             : 
     228             : static void virtscsi_handle_event(struct work_struct *work);
     229             : 
     230           0 : static int virtscsi_kick_event(struct virtio_scsi *vscsi,
     231             :                                struct virtio_scsi_event_node *event_node)
     232             : {
     233           0 :         int err;
     234           0 :         struct scatterlist sg;
     235           0 :         unsigned long flags;
     236             : 
     237           0 :         INIT_WORK(&event_node->work, virtscsi_handle_event);
     238           0 :         sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
     239             : 
     240           0 :         spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
     241             : 
     242           0 :         err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
     243             :                                   GFP_ATOMIC);
     244           0 :         if (!err)
     245           0 :                 virtqueue_kick(vscsi->event_vq.vq);
     246             : 
     247           0 :         spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
     248             : 
     249           0 :         return err;
     250             : }
     251             : 
     252           0 : static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
     253             : {
     254           0 :         int i;
     255             : 
     256           0 :         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
     257           0 :                 vscsi->event_list[i].vscsi = vscsi;
     258           0 :                 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
     259             :         }
     260             : 
     261           0 :         return 0;
     262             : }
     263             : 
     264           0 : static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
     265             : {
     266           0 :         int i;
     267             : 
     268             :         /* Stop scheduling work before calling cancel_work_sync.  */
     269           0 :         spin_lock_irq(&vscsi->event_vq.vq_lock);
     270           0 :         vscsi->stop_events = true;
     271           0 :         spin_unlock_irq(&vscsi->event_vq.vq_lock);
     272             : 
     273           0 :         for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
     274           0 :                 cancel_work_sync(&vscsi->event_list[i].work);
     275           0 : }
     276             : 
     277           0 : static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
     278             :                                             struct virtio_scsi_event *event)
     279             : {
     280           0 :         struct scsi_device *sdev;
     281           0 :         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
     282           0 :         unsigned int target = event->lun[1];
     283           0 :         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
     284             : 
     285           0 :         switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
     286           0 :         case VIRTIO_SCSI_EVT_RESET_RESCAN:
     287           0 :                 if (lun == 0) {
     288           0 :                         scsi_scan_target(&shost->shost_gendev, 0, target,
     289             :                                          SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
     290             :                 } else {
     291           0 :                         scsi_add_device(shost, 0, target, lun);
     292             :                 }
     293             :                 break;
     294           0 :         case VIRTIO_SCSI_EVT_RESET_REMOVED:
     295           0 :                 sdev = scsi_device_lookup(shost, 0, target, lun);
     296           0 :                 if (sdev) {
     297           0 :                         scsi_remove_device(sdev);
     298           0 :                         scsi_device_put(sdev);
     299             :                 } else {
     300           0 :                         pr_err("SCSI device %d 0 %d %d not found\n",
     301             :                                 shost->host_no, target, lun);
     302             :                 }
     303             :                 break;
     304           0 :         default:
     305           0 :                 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
     306             :         }
     307           0 : }
     308             : 
     309           0 : static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
     310             :                                          struct virtio_scsi_event *event)
     311             : {
     312           0 :         struct scsi_device *sdev;
     313           0 :         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
     314           0 :         unsigned int target = event->lun[1];
     315           0 :         unsigned int lun = (event->lun[2] << 8) | event->lun[3];
     316           0 :         u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
     317           0 :         u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
     318             : 
     319           0 :         sdev = scsi_device_lookup(shost, 0, target, lun);
     320           0 :         if (!sdev) {
     321           0 :                 pr_err("SCSI device %d 0 %d %d not found\n",
     322             :                         shost->host_no, target, lun);
     323           0 :                 return;
     324             :         }
     325             : 
     326             :         /* Handle "Parameters changed", "Mode parameters changed", and
     327             :            "Capacity data has changed".  */
     328           0 :         if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
     329           0 :                 scsi_rescan_device(&sdev->sdev_gendev);
     330             : 
     331           0 :         scsi_device_put(sdev);
     332             : }
     333             : 
     334           0 : static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
     335             : {
     336           0 :         struct scsi_device *sdev;
     337           0 :         struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
     338           0 :         unsigned char scsi_cmd[MAX_COMMAND_SIZE];
     339           0 :         int result, inquiry_len, inq_result_len = 256;
     340           0 :         char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
     341             : 
     342           0 :         shost_for_each_device(sdev, shost) {
     343           0 :                 inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
     344             : 
     345           0 :                 memset(scsi_cmd, 0, sizeof(scsi_cmd));
     346           0 :                 scsi_cmd[0] = INQUIRY;
     347           0 :                 scsi_cmd[4] = (unsigned char) inquiry_len;
     348             : 
     349           0 :                 memset(inq_result, 0, inq_result_len);
     350             : 
     351           0 :                 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
     352             :                                           inq_result, inquiry_len, NULL,
     353             :                                           SD_TIMEOUT, SD_MAX_RETRIES, NULL);
     354             : 
     355           0 :                 if (result == 0 && inq_result[0] >> 5) {
     356             :                         /* PQ indicates the LUN is not attached */
     357           0 :                         scsi_remove_device(sdev);
     358           0 :                 } else if (host_byte(result) == DID_BAD_TARGET) {
     359             :                         /*
     360             :                          * If all LUNs of a virtio-scsi device are unplugged
     361             :                          * it will respond with BAD TARGET on any INQUIRY
     362             :                          * command.
     363             :                          * Remove the device in this case as well.
     364             :                          */
     365           0 :                         scsi_remove_device(sdev);
     366             :                 }
     367             :         }
     368             : 
     369           0 :         kfree(inq_result);
     370           0 : }
     371             : 
     372           0 : static void virtscsi_handle_event(struct work_struct *work)
     373             : {
     374           0 :         struct virtio_scsi_event_node *event_node =
     375           0 :                 container_of(work, struct virtio_scsi_event_node, work);
     376           0 :         struct virtio_scsi *vscsi = event_node->vscsi;
     377           0 :         struct virtio_scsi_event *event = &event_node->event;
     378             : 
     379           0 :         if (event->event &
     380           0 :             cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
     381           0 :                 event->event &= ~cpu_to_virtio32(vscsi->vdev,
     382             :                                                    VIRTIO_SCSI_T_EVENTS_MISSED);
     383           0 :                 virtscsi_rescan_hotunplug(vscsi);
     384           0 :                 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
     385             :         }
     386             : 
     387           0 :         switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
     388             :         case VIRTIO_SCSI_T_NO_EVENT:
     389             :                 break;
     390           0 :         case VIRTIO_SCSI_T_TRANSPORT_RESET:
     391           0 :                 virtscsi_handle_transport_reset(vscsi, event);
     392           0 :                 break;
     393           0 :         case VIRTIO_SCSI_T_PARAM_CHANGE:
     394           0 :                 virtscsi_handle_param_change(vscsi, event);
     395           0 :                 break;
     396           0 :         default:
     397           0 :                 pr_err("Unsupport virtio scsi event %x\n", event->event);
     398             :         }
     399           0 :         virtscsi_kick_event(vscsi, event_node);
     400           0 : }
     401             : 
     402           0 : static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
     403             : {
     404           0 :         struct virtio_scsi_event_node *event_node = buf;
     405             : 
     406           0 :         if (!vscsi->stop_events)
     407           0 :                 queue_work(system_freezable_wq, &event_node->work);
     408           0 : }
     409             : 
     410           0 : static void virtscsi_event_done(struct virtqueue *vq)
     411             : {
     412           0 :         struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
     413           0 :         struct virtio_scsi *vscsi = shost_priv(sh);
     414             : 
     415           0 :         virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
     416           0 : };
     417             : 
     418           0 : static int __virtscsi_add_cmd(struct virtqueue *vq,
     419             :                             struct virtio_scsi_cmd *cmd,
     420             :                             size_t req_size, size_t resp_size)
     421             : {
     422           0 :         struct scsi_cmnd *sc = cmd->sc;
     423           0 :         struct scatterlist *sgs[6], req, resp;
     424           0 :         struct sg_table *out, *in;
     425           0 :         unsigned out_num = 0, in_num = 0;
     426             : 
     427           0 :         out = in = NULL;
     428             : 
     429           0 :         if (sc && sc->sc_data_direction != DMA_NONE) {
     430           0 :                 if (sc->sc_data_direction != DMA_FROM_DEVICE)
     431           0 :                         out = &sc->sdb.table;
     432           0 :                 if (sc->sc_data_direction != DMA_TO_DEVICE)
     433           0 :                         in = &sc->sdb.table;
     434             :         }
     435             : 
     436             :         /* Request header.  */
     437           0 :         sg_init_one(&req, &cmd->req, req_size);
     438           0 :         sgs[out_num++] = &req;
     439             : 
     440             :         /* Data-out buffer.  */
     441           0 :         if (out) {
     442             :                 /* Place WRITE protection SGLs before Data OUT payload */
     443           0 :                 if (scsi_prot_sg_count(sc))
     444           0 :                         sgs[out_num++] = scsi_prot_sglist(sc);
     445           0 :                 sgs[out_num++] = out->sgl;
     446             :         }
     447             : 
     448             :         /* Response header.  */
     449           0 :         sg_init_one(&resp, &cmd->resp, resp_size);
     450           0 :         sgs[out_num + in_num++] = &resp;
     451             : 
     452             :         /* Data-in buffer */
     453           0 :         if (in) {
     454             :                 /* Place READ protection SGLs before Data IN payload */
     455           0 :                 if (scsi_prot_sg_count(sc))
     456           0 :                         sgs[out_num + in_num++] = scsi_prot_sglist(sc);
     457           0 :                 sgs[out_num + in_num++] = in->sgl;
     458             :         }
     459             : 
     460           0 :         return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
     461             : }
     462             : 
     463           0 : static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
     464             : {
     465           0 :         bool needs_kick;
     466           0 :         unsigned long flags;
     467             : 
     468           0 :         spin_lock_irqsave(&vq->vq_lock, flags);
     469           0 :         needs_kick = virtqueue_kick_prepare(vq->vq);
     470           0 :         spin_unlock_irqrestore(&vq->vq_lock, flags);
     471             : 
     472           0 :         if (needs_kick)
     473           0 :                 virtqueue_notify(vq->vq);
     474           0 : }
     475             : 
     476             : /**
     477             :  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
     478             :  * @vq          : the struct virtqueue we're talking about
     479             :  * @cmd         : command structure
     480             :  * @req_size    : size of the request buffer
     481             :  * @resp_size   : size of the response buffer
     482             :  * @kick        : whether to kick the virtqueue immediately
     483             :  */
     484           0 : static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
     485             :                              struct virtio_scsi_cmd *cmd,
     486             :                              size_t req_size, size_t resp_size,
     487             :                              bool kick)
     488             : {
     489           0 :         unsigned long flags;
     490           0 :         int err;
     491           0 :         bool needs_kick = false;
     492             : 
     493           0 :         spin_lock_irqsave(&vq->vq_lock, flags);
     494           0 :         err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
     495           0 :         if (!err && kick)
     496           0 :                 needs_kick = virtqueue_kick_prepare(vq->vq);
     497             : 
     498           0 :         spin_unlock_irqrestore(&vq->vq_lock, flags);
     499             : 
     500           0 :         if (needs_kick)
     501           0 :                 virtqueue_notify(vq->vq);
     502           0 :         return err;
     503             : }
     504             : 
     505           0 : static void virtio_scsi_init_hdr(struct virtio_device *vdev,
     506             :                                  struct virtio_scsi_cmd_req *cmd,
     507             :                                  struct scsi_cmnd *sc)
     508             : {
     509           0 :         cmd->lun[0] = 1;
     510           0 :         cmd->lun[1] = sc->device->id;
     511           0 :         cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
     512           0 :         cmd->lun[3] = sc->device->lun & 0xff;
     513           0 :         cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
     514           0 :         cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
     515           0 :         cmd->prio = 0;
     516           0 :         cmd->crn = 0;
     517           0 : }
     518             : 
     519             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     520             : static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
     521             :                                     struct virtio_scsi_cmd_req_pi *cmd_pi,
     522             :                                     struct scsi_cmnd *sc)
     523             : {
     524             :         struct request *rq = sc->request;
     525             :         struct blk_integrity *bi;
     526             : 
     527             :         virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
     528             : 
     529             :         if (!rq || !scsi_prot_sg_count(sc))
     530             :                 return;
     531             : 
     532             :         bi = blk_get_integrity(rq->rq_disk);
     533             : 
     534             :         if (sc->sc_data_direction == DMA_TO_DEVICE)
     535             :                 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
     536             :                                                       bio_integrity_bytes(bi,
     537             :                                                         blk_rq_sectors(rq)));
     538             :         else if (sc->sc_data_direction == DMA_FROM_DEVICE)
     539             :                 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
     540             :                                                      bio_integrity_bytes(bi,
     541             :                                                         blk_rq_sectors(rq)));
     542             : }
     543             : #endif
     544             : 
     545           0 : static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
     546             :                                                   struct scsi_cmnd *sc)
     547             : {
     548           0 :         u32 tag = blk_mq_unique_tag(sc->request);
     549           0 :         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
     550             : 
     551           0 :         return &vscsi->req_vqs[hwq];
     552             : }
     553             : 
     554           0 : static int virtscsi_queuecommand(struct Scsi_Host *shost,
     555             :                                  struct scsi_cmnd *sc)
     556             : {
     557           0 :         struct virtio_scsi *vscsi = shost_priv(shost);
     558           0 :         struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
     559           0 :         struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
     560           0 :         bool kick;
     561           0 :         unsigned long flags;
     562           0 :         int req_size;
     563           0 :         int ret;
     564             : 
     565           0 :         BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
     566             : 
     567             :         /* TODO: check feature bit and fail if unsupported?  */
     568           0 :         BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
     569             : 
     570           0 :         dev_dbg(&sc->device->sdev_gendev,
     571             :                 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
     572             : 
     573           0 :         cmd->sc = sc;
     574             : 
     575           0 :         BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
     576             : 
     577             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     578             :         if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
     579             :                 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
     580             :                 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
     581             :                 req_size = sizeof(cmd->req.cmd_pi);
     582             :         } else
     583             : #endif
     584             :         {
     585           0 :                 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
     586           0 :                 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
     587           0 :                 req_size = sizeof(cmd->req.cmd);
     588             :         }
     589             : 
     590           0 :         kick = (sc->flags & SCMD_LAST) != 0;
     591           0 :         ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
     592           0 :         if (ret == -EIO) {
     593           0 :                 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
     594           0 :                 spin_lock_irqsave(&req_vq->vq_lock, flags);
     595           0 :                 virtscsi_complete_cmd(vscsi, cmd);
     596           0 :                 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
     597           0 :         } else if (ret != 0) {
     598           0 :                 return SCSI_MLQUEUE_HOST_BUSY;
     599             :         }
     600             :         return 0;
     601             : }
     602             : 
     603           0 : static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
     604             : {
     605           0 :         DECLARE_COMPLETION_ONSTACK(comp);
     606           0 :         int ret = FAILED;
     607             : 
     608           0 :         cmd->comp = &comp;
     609           0 :         if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
     610             :                               sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
     611           0 :                 goto out;
     612             : 
     613           0 :         wait_for_completion(&comp);
     614           0 :         if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
     615             :             cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
     616           0 :                 ret = SUCCESS;
     617             : 
     618             :         /*
     619             :          * The spec guarantees that all requests related to the TMF have
     620             :          * been completed, but the callback might not have run yet if
     621             :          * we're using independent interrupts (e.g. MSI).  Poll the
     622             :          * virtqueues once.
     623             :          *
     624             :          * In the abort case, sc->scsi_done will do nothing, because
     625             :          * the block layer must have detected a timeout and as a result
     626             :          * REQ_ATOM_COMPLETE has been set.
     627             :          */
     628           0 :         virtscsi_poll_requests(vscsi);
     629             : 
     630           0 : out:
     631           0 :         mempool_free(cmd, virtscsi_cmd_pool);
     632           0 :         return ret;
     633             : }
     634             : 
     635           0 : static int virtscsi_device_reset(struct scsi_cmnd *sc)
     636             : {
     637           0 :         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
     638           0 :         struct virtio_scsi_cmd *cmd;
     639             : 
     640           0 :         sdev_printk(KERN_INFO, sc->device, "device reset\n");
     641           0 :         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
     642           0 :         if (!cmd)
     643             :                 return FAILED;
     644             : 
     645           0 :         memset(cmd, 0, sizeof(*cmd));
     646           0 :         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
     647             :                 .type = VIRTIO_SCSI_T_TMF,
     648           0 :                 .subtype = cpu_to_virtio32(vscsi->vdev,
     649             :                                              VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
     650             :                 .lun[0] = 1,
     651           0 :                 .lun[1] = sc->device->id,
     652           0 :                 .lun[2] = (sc->device->lun >> 8) | 0x40,
     653             :                 .lun[3] = sc->device->lun & 0xff,
     654             :         };
     655           0 :         return virtscsi_tmf(vscsi, cmd);
     656             : }
     657             : 
     658           0 : static int virtscsi_device_alloc(struct scsi_device *sdevice)
     659             : {
     660             :         /*
     661             :          * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
     662             :          * may have transfer limits which come from the host SCSI
     663             :          * controller or something on the host side other than the
     664             :          * target itself.
     665             :          *
     666             :          * To make this work properly, the hypervisor can adjust the
     667             :          * target's VPD information to advertise these limits.  But
     668             :          * for that to work, the guest has to look at the VPD pages,
     669             :          * which we won't do by default if it is an SPC-2 device, even
     670             :          * if it does actually support it.
     671             :          *
     672             :          * So, set the blist to always try to read the VPD pages.
     673             :          */
     674           0 :         sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
     675             : 
     676           0 :         return 0;
     677             : }
     678             : 
     679             : 
     680             : /**
     681             :  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
     682             :  * @sdev:       Virtscsi target whose queue depth to change
     683             :  * @qdepth:     New queue depth
     684             :  */
     685           0 : static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
     686             : {
     687           0 :         struct Scsi_Host *shost = sdev->host;
     688           0 :         int max_depth = shost->cmd_per_lun;
     689             : 
     690           0 :         return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
     691             : }
     692             : 
     693           0 : static int virtscsi_abort(struct scsi_cmnd *sc)
     694             : {
     695           0 :         struct virtio_scsi *vscsi = shost_priv(sc->device->host);
     696           0 :         struct virtio_scsi_cmd *cmd;
     697             : 
     698           0 :         scmd_printk(KERN_INFO, sc, "abort\n");
     699           0 :         cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
     700           0 :         if (!cmd)
     701             :                 return FAILED;
     702             : 
     703           0 :         memset(cmd, 0, sizeof(*cmd));
     704           0 :         cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
     705             :                 .type = VIRTIO_SCSI_T_TMF,
     706             :                 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
     707             :                 .lun[0] = 1,
     708           0 :                 .lun[1] = sc->device->id,
     709           0 :                 .lun[2] = (sc->device->lun >> 8) | 0x40,
     710             :                 .lun[3] = sc->device->lun & 0xff,
     711           0 :                 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
     712             :         };
     713           0 :         return virtscsi_tmf(vscsi, cmd);
     714             : }
     715             : 
     716           0 : static int virtscsi_map_queues(struct Scsi_Host *shost)
     717             : {
     718           0 :         struct virtio_scsi *vscsi = shost_priv(shost);
     719           0 :         struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
     720             : 
     721           0 :         return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
     722             : }
     723             : 
     724           0 : static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
     725             : {
     726           0 :         struct virtio_scsi *vscsi = shost_priv(shost);
     727             : 
     728           0 :         virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
     729           0 : }
     730             : 
     731             : /*
     732             :  * The host guarantees to respond to each command, although I/O
     733             :  * latencies might be higher than on bare metal.  Reset the timer
     734             :  * unconditionally to give the host a chance to perform EH.
     735             :  */
     736           0 : static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
     737             : {
     738           0 :         return BLK_EH_RESET_TIMER;
     739             : }
     740             : 
     741             : static struct scsi_host_template virtscsi_host_template = {
     742             :         .module = THIS_MODULE,
     743             :         .name = "Virtio SCSI HBA",
     744             :         .proc_name = "virtio_scsi",
     745             :         .this_id = -1,
     746             :         .cmd_size = sizeof(struct virtio_scsi_cmd),
     747             :         .queuecommand = virtscsi_queuecommand,
     748             :         .commit_rqs = virtscsi_commit_rqs,
     749             :         .change_queue_depth = virtscsi_change_queue_depth,
     750             :         .eh_abort_handler = virtscsi_abort,
     751             :         .eh_device_reset_handler = virtscsi_device_reset,
     752             :         .eh_timed_out = virtscsi_eh_timed_out,
     753             :         .slave_alloc = virtscsi_device_alloc,
     754             : 
     755             :         .dma_boundary = UINT_MAX,
     756             :         .map_queues = virtscsi_map_queues,
     757             :         .track_queue_depth = 1,
     758             : };
     759             : 
     760             : #define virtscsi_config_get(vdev, fld) \
     761             :         ({ \
     762             :                 __virtio_native_type(struct virtio_scsi_config, fld) __val; \
     763             :                 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
     764             :                 __val; \
     765             :         })
     766             : 
     767             : #define virtscsi_config_set(vdev, fld, val) \
     768             :         do { \
     769             :                 __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
     770             :                 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
     771             :         } while(0)
     772             : 
     773           0 : static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
     774             :                              struct virtqueue *vq)
     775             : {
     776           0 :         spin_lock_init(&virtscsi_vq->vq_lock);
     777           0 :         virtscsi_vq->vq = vq;
     778             : }
     779             : 
     780           0 : static void virtscsi_remove_vqs(struct virtio_device *vdev)
     781             : {
     782             :         /* Stop all the virtqueues. */
     783           0 :         vdev->config->reset(vdev);
     784           0 :         vdev->config->del_vqs(vdev);
     785           0 : }
     786             : 
     787           0 : static int virtscsi_init(struct virtio_device *vdev,
     788             :                          struct virtio_scsi *vscsi)
     789             : {
     790           0 :         int err;
     791           0 :         u32 i;
     792           0 :         u32 num_vqs;
     793           0 :         vq_callback_t **callbacks;
     794           0 :         const char **names;
     795           0 :         struct virtqueue **vqs;
     796           0 :         struct irq_affinity desc = { .pre_vectors = 2 };
     797             : 
     798           0 :         num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
     799           0 :         vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
     800           0 :         callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
     801             :                                   GFP_KERNEL);
     802           0 :         names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
     803             : 
     804           0 :         if (!callbacks || !vqs || !names) {
     805           0 :                 err = -ENOMEM;
     806           0 :                 goto out;
     807             :         }
     808             : 
     809           0 :         callbacks[0] = virtscsi_ctrl_done;
     810           0 :         callbacks[1] = virtscsi_event_done;
     811           0 :         names[0] = "control";
     812           0 :         names[1] = "event";
     813           0 :         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
     814           0 :                 callbacks[i] = virtscsi_req_done;
     815           0 :                 names[i] = "request";
     816             :         }
     817             : 
     818             :         /* Discover virtqueues and write information to configuration.  */
     819           0 :         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
     820           0 :         if (err)
     821           0 :                 goto out;
     822             : 
     823           0 :         virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
     824           0 :         virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
     825           0 :         for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
     826           0 :                 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
     827           0 :                                  vqs[i]);
     828             : 
     829           0 :         virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
     830           0 :         virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
     831             : 
     832           0 :         err = 0;
     833             : 
     834           0 : out:
     835           0 :         kfree(names);
     836           0 :         kfree(callbacks);
     837           0 :         kfree(vqs);
     838           0 :         if (err)
     839           0 :                 virtscsi_remove_vqs(vdev);
     840           0 :         return err;
     841             : }
     842             : 
     843           0 : static int virtscsi_probe(struct virtio_device *vdev)
     844             : {
     845           0 :         struct Scsi_Host *shost;
     846           0 :         struct virtio_scsi *vscsi;
     847           0 :         int err;
     848           0 :         u32 sg_elems, num_targets;
     849           0 :         u32 cmd_per_lun;
     850           0 :         u32 num_queues;
     851             : 
     852           0 :         if (!vdev->config->get) {
     853           0 :                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
     854             :                         __func__);
     855           0 :                 return -EINVAL;
     856             :         }
     857             : 
     858             :         /* We need to know how many queues before we allocate. */
     859           0 :         num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
     860           0 :         num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
     861             : 
     862           0 :         num_targets = virtscsi_config_get(vdev, max_target) + 1;
     863             : 
     864           0 :         shost = scsi_host_alloc(&virtscsi_host_template,
     865           0 :                                 struct_size(vscsi, req_vqs, num_queues));
     866           0 :         if (!shost)
     867             :                 return -ENOMEM;
     868             : 
     869           0 :         sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
     870           0 :         shost->sg_tablesize = sg_elems;
     871           0 :         vscsi = shost_priv(shost);
     872           0 :         vscsi->vdev = vdev;
     873           0 :         vscsi->num_queues = num_queues;
     874           0 :         vdev->priv = shost;
     875             : 
     876           0 :         err = virtscsi_init(vdev, vscsi);
     877           0 :         if (err)
     878           0 :                 goto virtscsi_init_failed;
     879             : 
     880           0 :         shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
     881             : 
     882           0 :         cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
     883           0 :         shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
     884           0 :         shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
     885             : 
     886             :         /* LUNs > 256 are reported with format 1, so they go in the range
     887             :          * 16640-32767.
     888             :          */
     889           0 :         shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
     890           0 :         shost->max_id = num_targets;
     891           0 :         shost->max_channel = 0;
     892           0 :         shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
     893           0 :         shost->nr_hw_queues = num_queues;
     894             : 
     895             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     896             :         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
     897             :                 int host_prot;
     898             : 
     899             :                 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
     900             :                             SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
     901             :                             SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
     902             : 
     903             :                 scsi_host_set_prot(shost, host_prot);
     904             :                 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
     905             :         }
     906             : #endif
     907             : 
     908           0 :         err = scsi_add_host(shost, &vdev->dev);
     909           0 :         if (err)
     910           0 :                 goto scsi_add_host_failed;
     911             : 
     912           0 :         virtio_device_ready(vdev);
     913             : 
     914           0 :         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
     915           0 :                 virtscsi_kick_event_all(vscsi);
     916             : 
     917           0 :         scsi_scan_host(shost);
     918           0 :         return 0;
     919             : 
     920           0 : scsi_add_host_failed:
     921           0 :         vdev->config->del_vqs(vdev);
     922           0 : virtscsi_init_failed:
     923           0 :         scsi_host_put(shost);
     924           0 :         return err;
     925             : }
     926             : 
     927           0 : static void virtscsi_remove(struct virtio_device *vdev)
     928             : {
     929           0 :         struct Scsi_Host *shost = virtio_scsi_host(vdev);
     930           0 :         struct virtio_scsi *vscsi = shost_priv(shost);
     931             : 
     932           0 :         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
     933           0 :                 virtscsi_cancel_event_work(vscsi);
     934             : 
     935           0 :         scsi_remove_host(shost);
     936           0 :         virtscsi_remove_vqs(vdev);
     937           0 :         scsi_host_put(shost);
     938           0 : }
     939             : 
     940             : #ifdef CONFIG_PM_SLEEP
     941             : static int virtscsi_freeze(struct virtio_device *vdev)
     942             : {
     943             :         virtscsi_remove_vqs(vdev);
     944             :         return 0;
     945             : }
     946             : 
     947             : static int virtscsi_restore(struct virtio_device *vdev)
     948             : {
     949             :         struct Scsi_Host *sh = virtio_scsi_host(vdev);
     950             :         struct virtio_scsi *vscsi = shost_priv(sh);
     951             :         int err;
     952             : 
     953             :         err = virtscsi_init(vdev, vscsi);
     954             :         if (err)
     955             :                 return err;
     956             : 
     957             :         virtio_device_ready(vdev);
     958             : 
     959             :         if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
     960             :                 virtscsi_kick_event_all(vscsi);
     961             : 
     962             :         return err;
     963             : }
     964             : #endif
     965             : 
     966             : static struct virtio_device_id id_table[] = {
     967             :         { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
     968             :         { 0 },
     969             : };
     970             : 
     971             : static unsigned int features[] = {
     972             :         VIRTIO_SCSI_F_HOTPLUG,
     973             :         VIRTIO_SCSI_F_CHANGE,
     974             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     975             :         VIRTIO_SCSI_F_T10_PI,
     976             : #endif
     977             : };
     978             : 
     979             : static struct virtio_driver virtio_scsi_driver = {
     980             :         .feature_table = features,
     981             :         .feature_table_size = ARRAY_SIZE(features),
     982             :         .driver.name = KBUILD_MODNAME,
     983             :         .driver.owner = THIS_MODULE,
     984             :         .id_table = id_table,
     985             :         .probe = virtscsi_probe,
     986             : #ifdef CONFIG_PM_SLEEP
     987             :         .freeze = virtscsi_freeze,
     988             :         .restore = virtscsi_restore,
     989             : #endif
     990             :         .remove = virtscsi_remove,
     991             : };
     992             : 
     993           1 : static int __init init(void)
     994             : {
     995           1 :         int ret = -ENOMEM;
     996             : 
     997           1 :         virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
     998           1 :         if (!virtscsi_cmd_cache) {
     999           0 :                 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
    1000           0 :                 goto error;
    1001             :         }
    1002             : 
    1003             : 
    1004           2 :         virtscsi_cmd_pool =
    1005           1 :                 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
    1006             :                                          virtscsi_cmd_cache);
    1007           1 :         if (!virtscsi_cmd_pool) {
    1008           0 :                 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
    1009           0 :                 goto error;
    1010             :         }
    1011           1 :         ret = register_virtio_driver(&virtio_scsi_driver);
    1012           1 :         if (ret < 0)
    1013           0 :                 goto error;
    1014             : 
    1015             :         return 0;
    1016             : 
    1017           0 : error:
    1018           0 :         mempool_destroy(virtscsi_cmd_pool);
    1019           0 :         virtscsi_cmd_pool = NULL;
    1020           0 :         kmem_cache_destroy(virtscsi_cmd_cache);
    1021           0 :         virtscsi_cmd_cache = NULL;
    1022           0 :         return ret;
    1023             : }
    1024             : 
    1025           0 : static void __exit fini(void)
    1026             : {
    1027           0 :         unregister_virtio_driver(&virtio_scsi_driver);
    1028           0 :         mempool_destroy(virtscsi_cmd_pool);
    1029           0 :         kmem_cache_destroy(virtscsi_cmd_cache);
    1030           0 : }
    1031             : module_init(init);
    1032             : module_exit(fini);
    1033             : 
    1034             : MODULE_DEVICE_TABLE(virtio, id_table);
    1035             : MODULE_DESCRIPTION("Virtio SCSI HBA driver");
    1036             : MODULE_LICENSE("GPL");

Generated by: LCOV version 1.14