LCOV - code coverage report
Current view: top level - kernel/sched - completion.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 53 83 63.9 %
Date: 2021-04-22 12:43:58 Functions: 7 14 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Generic wait-for-completion handler;
       4             :  *
       5             :  * It differs from semaphores in that their default case is the opposite,
       6             :  * wait_for_completion default blocks whereas semaphore default non-block. The
       7             :  * interface also makes it easy to 'complete' multiple waiting threads,
       8             :  * something which isn't entirely natural for semaphores.
       9             :  *
      10             :  * But more importantly, the primitive documents the usage. Semaphores would
      11             :  * typically be used for exclusion which gives rise to priority inversion.
      12             :  * Waiting for completion is a typically sync point, but not an exclusion point.
      13             :  */
      14             : #include "sched.h"
      15             : 
      16             : /**
      17             :  * complete: - signals a single thread waiting on this completion
      18             :  * @x:  holds the state of this particular completion
      19             :  *
      20             :  * This will wake up a single thread waiting on this completion. Threads will be
      21             :  * awakened in the same order in which they were queued.
      22             :  *
      23             :  * See also complete_all(), wait_for_completion() and related routines.
      24             :  *
      25             :  * If this function wakes up a task, it executes a full memory barrier before
      26             :  * accessing the task state.
      27             :  */
      28         417 : void complete(struct completion *x)
      29             : {
      30         417 :         unsigned long flags;
      31             : 
      32         417 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
      33             : 
      34         417 :         if (x->done != UINT_MAX)
      35         417 :                 x->done++;
      36         417 :         swake_up_locked(&x->wait);
      37         417 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
      38         417 : }
      39             : EXPORT_SYMBOL(complete);
      40             : 
      41             : /**
      42             :  * complete_all: - signals all threads waiting on this completion
      43             :  * @x:  holds the state of this particular completion
      44             :  *
      45             :  * This will wake up all threads waiting on this particular completion event.
      46             :  *
      47             :  * If this function wakes up a task, it executes a full memory barrier before
      48             :  * accessing the task state.
      49             :  *
      50             :  * Since complete_all() sets the completion of @x permanently to done
      51             :  * to allow multiple waiters to finish, a call to reinit_completion()
      52             :  * must be used on @x if @x is to be used again. The code must make
      53             :  * sure that all waiters have woken and finished before reinitializing
      54             :  * @x. Also note that the function completion_done() can not be used
      55             :  * to know if there are still waiters after complete_all() has been called.
      56             :  */
      57          26 : void complete_all(struct completion *x)
      58             : {
      59          26 :         unsigned long flags;
      60             : 
      61          26 :         lockdep_assert_RT_in_threaded_ctx();
      62             : 
      63          26 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
      64          26 :         x->done = UINT_MAX;
      65          26 :         swake_up_all_locked(&x->wait);
      66          26 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
      67          26 : }
      68             : EXPORT_SYMBOL(complete_all);
      69             : 
      70             : static inline long __sched
      71         384 : do_wait_for_common(struct completion *x,
      72             :                    long (*action)(long), long timeout, int state)
      73             : {
      74         384 :         if (!x->done) {
      75         334 :                 DECLARE_SWAITQUEUE(wait);
      76             : 
      77         334 :                 do {
      78         334 :                         if (signal_pending_state(state, current)) {
      79             :                                 timeout = -ERESTARTSYS;
      80             :                                 break;
      81             :                         }
      82         334 :                         __prepare_to_swait(&x->wait, &wait);
      83         334 :                         __set_current_state(state);
      84         334 :                         raw_spin_unlock_irq(&x->wait.lock);
      85         334 :                         timeout = action(timeout);
      86         334 :                         raw_spin_lock_irq(&x->wait.lock);
      87         334 :                 } while (!x->done && timeout);
      88         334 :                 __finish_swait(&x->wait, &wait);
      89         334 :                 if (!x->done)
      90           0 :                         return timeout;
      91             :         }
      92         384 :         if (x->done != UINT_MAX)
      93         368 :                 x->done--;
      94         384 :         return timeout ?: 1;
      95             : }
      96             : 
      97             : static inline long __sched
      98         384 : __wait_for_common(struct completion *x,
      99             :                   long (*action)(long), long timeout, int state)
     100             : {
     101         384 :         might_sleep();
     102             : 
     103         384 :         complete_acquire(x);
     104             : 
     105         384 :         raw_spin_lock_irq(&x->wait.lock);
     106         384 :         timeout = do_wait_for_common(x, action, timeout, state);
     107         384 :         raw_spin_unlock_irq(&x->wait.lock);
     108             : 
     109         384 :         complete_release(x);
     110             : 
     111         384 :         return timeout;
     112             : }
     113             : 
     114             : static long __sched
     115         310 : wait_for_common(struct completion *x, long timeout, int state)
     116             : {
     117         310 :         return __wait_for_common(x, schedule_timeout, timeout, state);
     118             : }
     119             : 
     120             : static long __sched
     121          74 : wait_for_common_io(struct completion *x, long timeout, int state)
     122             : {
     123          74 :         return __wait_for_common(x, io_schedule_timeout, timeout, state);
     124             : }
     125             : 
     126             : /**
     127             :  * wait_for_completion: - waits for completion of a task
     128             :  * @x:  holds the state of this particular completion
     129             :  *
     130             :  * This waits to be signaled for completion of a specific task. It is NOT
     131             :  * interruptible and there is no timeout.
     132             :  *
     133             :  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
     134             :  * and interrupt capability. Also see complete().
     135             :  */
     136         252 : void __sched wait_for_completion(struct completion *x)
     137             : {
     138         252 :         wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
     139         252 : }
     140             : EXPORT_SYMBOL(wait_for_completion);
     141             : 
     142             : /**
     143             :  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
     144             :  * @x:  holds the state of this particular completion
     145             :  * @timeout:  timeout value in jiffies
     146             :  *
     147             :  * This waits for either a completion of a specific task to be signaled or for a
     148             :  * specified timeout to expire. The timeout is in jiffies. It is not
     149             :  * interruptible.
     150             :  *
     151             :  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
     152             :  * till timeout) if completed.
     153             :  */
     154             : unsigned long __sched
     155           0 : wait_for_completion_timeout(struct completion *x, unsigned long timeout)
     156             : {
     157           0 :         return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
     158             : }
     159             : EXPORT_SYMBOL(wait_for_completion_timeout);
     160             : 
     161             : /**
     162             :  * wait_for_completion_io: - waits for completion of a task
     163             :  * @x:  holds the state of this particular completion
     164             :  *
     165             :  * This waits to be signaled for completion of a specific task. It is NOT
     166             :  * interruptible and there is no timeout. The caller is accounted as waiting
     167             :  * for IO (which traditionally means blkio only).
     168             :  */
     169          74 : void __sched wait_for_completion_io(struct completion *x)
     170             : {
     171          74 :         wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
     172          74 : }
     173             : EXPORT_SYMBOL(wait_for_completion_io);
     174             : 
     175             : /**
     176             :  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
     177             :  * @x:  holds the state of this particular completion
     178             :  * @timeout:  timeout value in jiffies
     179             :  *
     180             :  * This waits for either a completion of a specific task to be signaled or for a
     181             :  * specified timeout to expire. The timeout is in jiffies. It is not
     182             :  * interruptible. The caller is accounted as waiting for IO (which traditionally
     183             :  * means blkio only).
     184             :  *
     185             :  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
     186             :  * till timeout) if completed.
     187             :  */
     188             : unsigned long __sched
     189           0 : wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
     190             : {
     191           0 :         return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
     192             : }
     193             : EXPORT_SYMBOL(wait_for_completion_io_timeout);
     194             : 
     195             : /**
     196             :  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
     197             :  * @x:  holds the state of this particular completion
     198             :  *
     199             :  * This waits for completion of a specific task to be signaled. It is
     200             :  * interruptible.
     201             :  *
     202             :  * Return: -ERESTARTSYS if interrupted, 0 if completed.
     203             :  */
     204           0 : int __sched wait_for_completion_interruptible(struct completion *x)
     205             : {
     206           0 :         long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
     207           0 :         if (t == -ERESTARTSYS)
     208           0 :                 return t;
     209             :         return 0;
     210             : }
     211             : EXPORT_SYMBOL(wait_for_completion_interruptible);
     212             : 
     213             : /**
     214             :  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
     215             :  * @x:  holds the state of this particular completion
     216             :  * @timeout:  timeout value in jiffies
     217             :  *
     218             :  * This waits for either a completion of a specific task to be signaled or for a
     219             :  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
     220             :  *
     221             :  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
     222             :  * or number of jiffies left till timeout) if completed.
     223             :  */
     224             : long __sched
     225           0 : wait_for_completion_interruptible_timeout(struct completion *x,
     226             :                                           unsigned long timeout)
     227             : {
     228           0 :         return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
     229             : }
     230             : EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
     231             : 
     232             : /**
     233             :  * wait_for_completion_killable: - waits for completion of a task (killable)
     234             :  * @x:  holds the state of this particular completion
     235             :  *
     236             :  * This waits to be signaled for completion of a specific task. It can be
     237             :  * interrupted by a kill signal.
     238             :  *
     239             :  * Return: -ERESTARTSYS if interrupted, 0 if completed.
     240             :  */
     241          58 : int __sched wait_for_completion_killable(struct completion *x)
     242             : {
     243          58 :         long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
     244          58 :         if (t == -ERESTARTSYS)
     245           0 :                 return t;
     246             :         return 0;
     247             : }
     248             : EXPORT_SYMBOL(wait_for_completion_killable);
     249             : 
     250             : /**
     251             :  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
     252             :  * @x:  holds the state of this particular completion
     253             :  * @timeout:  timeout value in jiffies
     254             :  *
     255             :  * This waits for either a completion of a specific task to be
     256             :  * signaled or for a specified timeout to expire. It can be
     257             :  * interrupted by a kill signal. The timeout is in jiffies.
     258             :  *
     259             :  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
     260             :  * or number of jiffies left till timeout) if completed.
     261             :  */
     262             : long __sched
     263           0 : wait_for_completion_killable_timeout(struct completion *x,
     264             :                                      unsigned long timeout)
     265             : {
     266           0 :         return wait_for_common(x, timeout, TASK_KILLABLE);
     267             : }
     268             : EXPORT_SYMBOL(wait_for_completion_killable_timeout);
     269             : 
     270             : /**
     271             :  *      try_wait_for_completion - try to decrement a completion without blocking
     272             :  *      @x:     completion structure
     273             :  *
     274             :  *      Return: 0 if a decrement cannot be done without blocking
     275             :  *               1 if a decrement succeeded.
     276             :  *
     277             :  *      If a completion is being used as a counting completion,
     278             :  *      attempt to decrement the counter without blocking. This
     279             :  *      enables us to avoid waiting if the resource the completion
     280             :  *      is protecting is not available.
     281             :  */
     282           0 : bool try_wait_for_completion(struct completion *x)
     283             : {
     284           0 :         unsigned long flags;
     285           0 :         bool ret = true;
     286             : 
     287             :         /*
     288             :          * Since x->done will need to be locked only
     289             :          * in the non-blocking case, we check x->done
     290             :          * first without taking the lock so we can
     291             :          * return early in the blocking case.
     292             :          */
     293           0 :         if (!READ_ONCE(x->done))
     294             :                 return false;
     295             : 
     296           0 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
     297           0 :         if (!x->done)
     298             :                 ret = false;
     299           0 :         else if (x->done != UINT_MAX)
     300           0 :                 x->done--;
     301           0 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
     302           0 :         return ret;
     303             : }
     304             : EXPORT_SYMBOL(try_wait_for_completion);
     305             : 
     306             : /**
     307             :  *      completion_done - Test to see if a completion has any waiters
     308             :  *      @x:     completion structure
     309             :  *
     310             :  *      Return: 0 if there are waiters (wait_for_completion() in progress)
     311             :  *               1 if there are no waiters.
     312             :  *
     313             :  *      Note, this will always return true if complete_all() was called on @X.
     314             :  */
     315           0 : bool completion_done(struct completion *x)
     316             : {
     317           0 :         unsigned long flags;
     318             : 
     319           0 :         if (!READ_ONCE(x->done))
     320             :                 return false;
     321             : 
     322             :         /*
     323             :          * If ->done, we need to wait for complete() to release ->wait.lock
     324             :          * otherwise we can end up freeing the completion before complete()
     325             :          * is done referencing it.
     326             :          */
     327           0 :         raw_spin_lock_irqsave(&x->wait.lock, flags);
     328           0 :         raw_spin_unlock_irqrestore(&x->wait.lock, flags);
     329           0 :         return true;
     330             : }
     331             : EXPORT_SYMBOL(completion_done);

Generated by: LCOV version 1.14