LCOV - code coverage report
Current view: top level - block - blk-ioc.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 65 178 36.5 %
Date: 2021-04-22 12:43:58 Functions: 7 15 46.7 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to io context handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/module.h>
       7             : #include <linux/init.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/slab.h>
      11             : #include <linux/sched/task.h>
      12             : 
      13             : #include "blk.h"
      14             : 
      15             : /*
      16             :  * For io context allocations
      17             :  */
      18             : static struct kmem_cache *iocontext_cachep;
      19             : 
      20             : /**
      21             :  * get_io_context - increment reference count to io_context
      22             :  * @ioc: io_context to get
      23             :  *
      24             :  * Increment reference count to @ioc.
      25             :  */
      26          23 : void get_io_context(struct io_context *ioc)
      27             : {
      28          23 :         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
      29          23 :         atomic_long_inc(&ioc->refcount);
      30          23 : }
      31             : 
      32           0 : static void icq_free_icq_rcu(struct rcu_head *head)
      33             : {
      34           0 :         struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
      35             : 
      36           0 :         kmem_cache_free(icq->__rcu_icq_cache, icq);
      37           0 : }
      38             : 
      39             : /*
      40             :  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
      41             :  * and queue locked for legacy.
      42             :  */
      43           0 : static void ioc_exit_icq(struct io_cq *icq)
      44             : {
      45           0 :         struct elevator_type *et = icq->q->elevator->type;
      46             : 
      47           0 :         if (icq->flags & ICQ_EXITED)
      48             :                 return;
      49             : 
      50           0 :         if (et->ops.exit_icq)
      51           0 :                 et->ops.exit_icq(icq);
      52             : 
      53           0 :         icq->flags |= ICQ_EXITED;
      54             : }
      55             : 
      56             : /*
      57             :  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
      58             :  * and queue locked for legacy.
      59             :  */
      60           0 : static void ioc_destroy_icq(struct io_cq *icq)
      61             : {
      62           0 :         struct io_context *ioc = icq->ioc;
      63           0 :         struct request_queue *q = icq->q;
      64           0 :         struct elevator_type *et = q->elevator->type;
      65             : 
      66           0 :         lockdep_assert_held(&ioc->lock);
      67             : 
      68           0 :         radix_tree_delete(&ioc->icq_tree, icq->q->id);
      69           0 :         hlist_del_init(&icq->ioc_node);
      70           0 :         list_del_init(&icq->q_node);
      71             : 
      72             :         /*
      73             :          * Both setting lookup hint to and clearing it from @icq are done
      74             :          * under queue_lock.  If it's not pointing to @icq now, it never
      75             :          * will.  Hint assignment itself can race safely.
      76             :          */
      77           0 :         if (rcu_access_pointer(ioc->icq_hint) == icq)
      78           0 :                 rcu_assign_pointer(ioc->icq_hint, NULL);
      79             : 
      80           0 :         ioc_exit_icq(icq);
      81             : 
      82             :         /*
      83             :          * @icq->q might have gone away by the time RCU callback runs
      84             :          * making it impossible to determine icq_cache.  Record it in @icq.
      85             :          */
      86           0 :         icq->__rcu_icq_cache = et->icq_cache;
      87           0 :         icq->flags |= ICQ_DESTROYED;
      88           0 :         call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
      89           0 : }
      90             : 
      91             : /*
      92             :  * Slow path for ioc release in put_io_context().  Performs double-lock
      93             :  * dancing to unlink all icq's and then frees ioc.
      94             :  */
      95           0 : static void ioc_release_fn(struct work_struct *work)
      96             : {
      97           0 :         struct io_context *ioc = container_of(work, struct io_context,
      98             :                                               release_work);
      99           0 :         spin_lock_irq(&ioc->lock);
     100             : 
     101           0 :         while (!hlist_empty(&ioc->icq_list)) {
     102           0 :                 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
     103             :                                                 struct io_cq, ioc_node);
     104           0 :                 struct request_queue *q = icq->q;
     105             : 
     106           0 :                 if (spin_trylock(&q->queue_lock)) {
     107           0 :                         ioc_destroy_icq(icq);
     108           0 :                         spin_unlock(&q->queue_lock);
     109             :                 } else {
     110             :                         /* Make sure q and icq cannot be freed. */
     111           0 :                         rcu_read_lock();
     112             : 
     113             :                         /* Re-acquire the locks in the correct order. */
     114           0 :                         spin_unlock(&ioc->lock);
     115           0 :                         spin_lock(&q->queue_lock);
     116           0 :                         spin_lock(&ioc->lock);
     117             : 
     118             :                         /*
     119             :                          * The icq may have been destroyed when the ioc lock
     120             :                          * was released.
     121             :                          */
     122           0 :                         if (!(icq->flags & ICQ_DESTROYED))
     123           0 :                                 ioc_destroy_icq(icq);
     124             : 
     125           0 :                         spin_unlock(&q->queue_lock);
     126           0 :                         rcu_read_unlock();
     127             :                 }
     128             :         }
     129             : 
     130           0 :         spin_unlock_irq(&ioc->lock);
     131             : 
     132           0 :         kmem_cache_free(iocontext_cachep, ioc);
     133           0 : }
     134             : 
     135             : /**
     136             :  * put_io_context - put a reference of io_context
     137             :  * @ioc: io_context to put
     138             :  *
     139             :  * Decrement reference count of @ioc and release it if the count reaches
     140             :  * zero.
     141             :  */
     142         171 : void put_io_context(struct io_context *ioc)
     143             : {
     144         171 :         unsigned long flags;
     145         171 :         bool free_ioc = false;
     146             : 
     147         171 :         if (ioc == NULL)
     148             :                 return;
     149             : 
     150         171 :         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
     151             : 
     152             :         /*
     153             :          * Releasing ioc requires reverse order double locking and we may
     154             :          * already be holding a queue_lock.  Do it asynchronously from wq.
     155             :          */
     156         342 :         if (atomic_long_dec_and_test(&ioc->refcount)) {
     157         148 :                 spin_lock_irqsave(&ioc->lock, flags);
     158         148 :                 if (!hlist_empty(&ioc->icq_list))
     159           0 :                         queue_work(system_power_efficient_wq,
     160             :                                         &ioc->release_work);
     161             :                 else
     162             :                         free_ioc = true;
     163         148 :                 spin_unlock_irqrestore(&ioc->lock, flags);
     164             :         }
     165             : 
     166         148 :         if (free_ioc)
     167         148 :                 kmem_cache_free(iocontext_cachep, ioc);
     168             : }
     169             : 
     170             : /**
     171             :  * put_io_context_active - put active reference on ioc
     172             :  * @ioc: ioc of interest
     173             :  *
     174             :  * Undo get_io_context_active().  If active reference reaches zero after
     175             :  * put, @ioc can never issue further IOs and ioscheds are notified.
     176             :  */
     177         148 : void put_io_context_active(struct io_context *ioc)
     178             : {
     179         148 :         struct io_cq *icq;
     180             : 
     181         296 :         if (!atomic_dec_and_test(&ioc->active_ref)) {
     182           0 :                 put_io_context(ioc);
     183           0 :                 return;
     184             :         }
     185             : 
     186         148 :         spin_lock_irq(&ioc->lock);
     187         296 :         hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
     188           0 :                 if (icq->flags & ICQ_EXITED)
     189           0 :                         continue;
     190             : 
     191           0 :                 ioc_exit_icq(icq);
     192             :         }
     193         148 :         spin_unlock_irq(&ioc->lock);
     194             : 
     195         148 :         put_io_context(ioc);
     196             : }
     197             : 
     198             : /* Called by the exiting task */
     199         148 : void exit_io_context(struct task_struct *task)
     200             : {
     201         148 :         struct io_context *ioc;
     202             : 
     203         148 :         task_lock(task);
     204         148 :         ioc = task->io_context;
     205         148 :         task->io_context = NULL;
     206         148 :         task_unlock(task);
     207             : 
     208         148 :         atomic_dec(&ioc->nr_tasks);
     209         148 :         put_io_context_active(ioc);
     210         148 : }
     211             : 
     212           0 : static void __ioc_clear_queue(struct list_head *icq_list)
     213             : {
     214           0 :         unsigned long flags;
     215             : 
     216           0 :         rcu_read_lock();
     217           0 :         while (!list_empty(icq_list)) {
     218           0 :                 struct io_cq *icq = list_entry(icq_list->next,
     219             :                                                 struct io_cq, q_node);
     220           0 :                 struct io_context *ioc = icq->ioc;
     221             : 
     222           0 :                 spin_lock_irqsave(&ioc->lock, flags);
     223           0 :                 if (icq->flags & ICQ_DESTROYED) {
     224           0 :                         spin_unlock_irqrestore(&ioc->lock, flags);
     225           0 :                         continue;
     226             :                 }
     227           0 :                 ioc_destroy_icq(icq);
     228           0 :                 spin_unlock_irqrestore(&ioc->lock, flags);
     229             :         }
     230           0 :         rcu_read_unlock();
     231           0 : }
     232             : 
     233             : /**
     234             :  * ioc_clear_queue - break any ioc association with the specified queue
     235             :  * @q: request_queue being cleared
     236             :  *
     237             :  * Walk @q->icq_list and exit all io_cq's.
     238             :  */
     239           0 : void ioc_clear_queue(struct request_queue *q)
     240             : {
     241           0 :         LIST_HEAD(icq_list);
     242             : 
     243           0 :         spin_lock_irq(&q->queue_lock);
     244           0 :         list_splice_init(&q->icq_list, &icq_list);
     245           0 :         spin_unlock_irq(&q->queue_lock);
     246             : 
     247           0 :         __ioc_clear_queue(&icq_list);
     248           0 : }
     249             : 
     250         167 : int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
     251             : {
     252         167 :         struct io_context *ioc;
     253         167 :         int ret;
     254             : 
     255         167 :         ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
     256             :                                     node);
     257         167 :         if (unlikely(!ioc))
     258             :                 return -ENOMEM;
     259             : 
     260             :         /* initialize */
     261         167 :         atomic_long_set(&ioc->refcount, 1);
     262         167 :         atomic_set(&ioc->nr_tasks, 1);
     263         167 :         atomic_set(&ioc->active_ref, 1);
     264         167 :         spin_lock_init(&ioc->lock);
     265         167 :         INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
     266         167 :         INIT_HLIST_HEAD(&ioc->icq_list);
     267         167 :         INIT_WORK(&ioc->release_work, ioc_release_fn);
     268             : 
     269             :         /*
     270             :          * Try to install.  ioc shouldn't be installed if someone else
     271             :          * already did or @task, which isn't %current, is exiting.  Note
     272             :          * that we need to allow ioc creation on exiting %current as exit
     273             :          * path may issue IOs from e.g. exit_files().  The exit path is
     274             :          * responsible for not issuing IO after exit_io_context().
     275             :          */
     276         167 :         task_lock(task);
     277         167 :         if (!task->io_context &&
     278         167 :             (task == current || !(task->flags & PF_EXITING)))
     279         167 :                 task->io_context = ioc;
     280             :         else
     281           0 :                 kmem_cache_free(iocontext_cachep, ioc);
     282             : 
     283         167 :         ret = task->io_context ? 0 : -EBUSY;
     284             : 
     285         167 :         task_unlock(task);
     286             : 
     287         167 :         return ret;
     288             : }
     289             : 
     290             : /**
     291             :  * get_task_io_context - get io_context of a task
     292             :  * @task: task of interest
     293             :  * @gfp_flags: allocation flags, used if allocation is necessary
     294             :  * @node: allocation node, used if allocation is necessary
     295             :  *
     296             :  * Return io_context of @task.  If it doesn't exist, it is created with
     297             :  * @gfp_flags and @node.  The returned io_context has its reference count
     298             :  * incremented.
     299             :  *
     300             :  * This function always goes through task_lock() and it's better to use
     301             :  * %current->io_context + get_io_context() for %current.
     302             :  */
     303          23 : struct io_context *get_task_io_context(struct task_struct *task,
     304             :                                        gfp_t gfp_flags, int node)
     305             : {
     306          23 :         struct io_context *ioc;
     307             : 
     308          23 :         might_sleep_if(gfpflags_allow_blocking(gfp_flags));
     309             : 
     310          45 :         do {
     311          45 :                 task_lock(task);
     312          45 :                 ioc = task->io_context;
     313          45 :                 if (likely(ioc)) {
     314          23 :                         get_io_context(ioc);
     315          23 :                         task_unlock(task);
     316          23 :                         return ioc;
     317             :                 }
     318          22 :                 task_unlock(task);
     319          22 :         } while (!create_task_io_context(task, gfp_flags, node));
     320             : 
     321             :         return NULL;
     322             : }
     323             : 
     324             : /**
     325             :  * ioc_lookup_icq - lookup io_cq from ioc
     326             :  * @ioc: the associated io_context
     327             :  * @q: the associated request_queue
     328             :  *
     329             :  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
     330             :  * with @q->queue_lock held.
     331             :  */
     332           0 : struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
     333             : {
     334           0 :         struct io_cq *icq;
     335             : 
     336           0 :         lockdep_assert_held(&q->queue_lock);
     337             : 
     338             :         /*
     339             :          * icq's are indexed from @ioc using radix tree and hint pointer,
     340             :          * both of which are protected with RCU.  All removals are done
     341             :          * holding both q and ioc locks, and we're holding q lock - if we
     342             :          * find a icq which points to us, it's guaranteed to be valid.
     343             :          */
     344           0 :         rcu_read_lock();
     345           0 :         icq = rcu_dereference(ioc->icq_hint);
     346           0 :         if (icq && icq->q == q)
     347           0 :                 goto out;
     348             : 
     349           0 :         icq = radix_tree_lookup(&ioc->icq_tree, q->id);
     350           0 :         if (icq && icq->q == q)
     351           0 :                 rcu_assign_pointer(ioc->icq_hint, icq);      /* allowed to race */
     352             :         else
     353             :                 icq = NULL;
     354           0 : out:
     355           0 :         rcu_read_unlock();
     356           0 :         return icq;
     357             : }
     358             : EXPORT_SYMBOL(ioc_lookup_icq);
     359             : 
     360             : /**
     361             :  * ioc_create_icq - create and link io_cq
     362             :  * @ioc: io_context of interest
     363             :  * @q: request_queue of interest
     364             :  * @gfp_mask: allocation mask
     365             :  *
     366             :  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
     367             :  * will be created using @gfp_mask.
     368             :  *
     369             :  * The caller is responsible for ensuring @ioc won't go away and @q is
     370             :  * alive and will stay alive until this function returns.
     371             :  */
     372           0 : struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
     373             :                              gfp_t gfp_mask)
     374             : {
     375           0 :         struct elevator_type *et = q->elevator->type;
     376           0 :         struct io_cq *icq;
     377             : 
     378             :         /* allocate stuff */
     379           0 :         icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
     380             :                                     q->node);
     381           0 :         if (!icq)
     382             :                 return NULL;
     383             : 
     384           0 :         if (radix_tree_maybe_preload(gfp_mask) < 0) {
     385           0 :                 kmem_cache_free(et->icq_cache, icq);
     386           0 :                 return NULL;
     387             :         }
     388             : 
     389           0 :         icq->ioc = ioc;
     390           0 :         icq->q = q;
     391           0 :         INIT_LIST_HEAD(&icq->q_node);
     392           0 :         INIT_HLIST_NODE(&icq->ioc_node);
     393             : 
     394             :         /* lock both q and ioc and try to link @icq */
     395           0 :         spin_lock_irq(&q->queue_lock);
     396           0 :         spin_lock(&ioc->lock);
     397             : 
     398           0 :         if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
     399           0 :                 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
     400           0 :                 list_add(&icq->q_node, &q->icq_list);
     401           0 :                 if (et->ops.init_icq)
     402           0 :                         et->ops.init_icq(icq);
     403             :         } else {
     404           0 :                 kmem_cache_free(et->icq_cache, icq);
     405           0 :                 icq = ioc_lookup_icq(ioc, q);
     406           0 :                 if (!icq)
     407           0 :                         printk(KERN_ERR "cfq: icq link failed!\n");
     408             :         }
     409             : 
     410           0 :         spin_unlock(&ioc->lock);
     411           0 :         spin_unlock_irq(&q->queue_lock);
     412           0 :         radix_tree_preload_end();
     413           0 :         return icq;
     414             : }
     415             : 
     416           1 : static int __init blk_ioc_init(void)
     417             : {
     418           1 :         iocontext_cachep = kmem_cache_create("blkdev_ioc",
     419             :                         sizeof(struct io_context), 0, SLAB_PANIC, NULL);
     420           1 :         return 0;
     421             : }
     422             : subsys_initcall(blk_ioc_init);

Generated by: LCOV version 1.14