LCOV - code coverage report
Current view: top level - kernel/locking - rtmutex.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 9 461 2.0 %
Date: 2021-04-22 12:43:58 Functions: 1 39 2.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
       4             :  *
       5             :  * started by Ingo Molnar and Thomas Gleixner.
       6             :  *
       7             :  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
       8             :  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
       9             :  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
      10             :  *  Copyright (C) 2006 Esben Nielsen
      11             :  *
      12             :  *  See Documentation/locking/rt-mutex-design.rst for details.
      13             :  */
      14             : #include <linux/spinlock.h>
      15             : #include <linux/export.h>
      16             : #include <linux/sched/signal.h>
      17             : #include <linux/sched/rt.h>
      18             : #include <linux/sched/deadline.h>
      19             : #include <linux/sched/wake_q.h>
      20             : #include <linux/sched/debug.h>
      21             : #include <linux/timer.h>
      22             : 
      23             : #include "rtmutex_common.h"
      24             : 
      25             : /*
      26             :  * lock->owner state tracking:
      27             :  *
      28             :  * lock->owner holds the task_struct pointer of the owner. Bit 0
      29             :  * is used to keep track of the "lock has waiters" state.
      30             :  *
      31             :  * owner        bit0
      32             :  * NULL         0       lock is free (fast acquire possible)
      33             :  * NULL         1       lock is free and has waiters and the top waiter
      34             :  *                              is going to take the lock*
      35             :  * taskpointer  0       lock is held (fast release possible)
      36             :  * taskpointer  1       lock is held and has waiters**
      37             :  *
      38             :  * The fast atomic compare exchange based acquire and release is only
      39             :  * possible when bit 0 of lock->owner is 0.
      40             :  *
      41             :  * (*) It also can be a transitional state when grabbing the lock
      42             :  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
      43             :  * we need to set the bit0 before looking at the lock, and the owner may be
      44             :  * NULL in this small time, hence this can be a transitional state.
      45             :  *
      46             :  * (**) There is a small time when bit 0 is set but there are no
      47             :  * waiters. This can happen when grabbing the lock in the slow path.
      48             :  * To prevent a cmpxchg of the owner releasing the lock, we need to
      49             :  * set this bit before looking at the lock.
      50             :  */
      51             : 
      52             : static void
      53           0 : rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
      54             : {
      55           0 :         unsigned long val = (unsigned long)owner;
      56             : 
      57           0 :         if (rt_mutex_has_waiters(lock))
      58           0 :                 val |= RT_MUTEX_HAS_WAITERS;
      59             : 
      60           0 :         WRITE_ONCE(lock->owner, (struct task_struct *)val);
      61             : }
      62             : 
      63             : static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
      64             : {
      65             :         lock->owner = (struct task_struct *)
      66             :                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
      67             : }
      68             : 
      69           0 : static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
      70             : {
      71           0 :         unsigned long owner, *p = (unsigned long *) &lock->owner;
      72             : 
      73           0 :         if (rt_mutex_has_waiters(lock))
      74             :                 return;
      75             : 
      76             :         /*
      77             :          * The rbtree has no waiters enqueued, now make sure that the
      78             :          * lock->owner still has the waiters bit set, otherwise the
      79             :          * following can happen:
      80             :          *
      81             :          * CPU 0        CPU 1           CPU2
      82             :          * l->owner=T1
      83             :          *              rt_mutex_lock(l)
      84             :          *              lock(l->lock)
      85             :          *              l->owner = T1 | HAS_WAITERS;
      86             :          *              enqueue(T2)
      87             :          *              boost()
      88             :          *                unlock(l->lock)
      89             :          *              block()
      90             :          *
      91             :          *                              rt_mutex_lock(l)
      92             :          *                              lock(l->lock)
      93             :          *                              l->owner = T1 | HAS_WAITERS;
      94             :          *                              enqueue(T3)
      95             :          *                              boost()
      96             :          *                                unlock(l->lock)
      97             :          *                              block()
      98             :          *              signal(->T2) signal(->T3)
      99             :          *              lock(l->lock)
     100             :          *              dequeue(T2)
     101             :          *              deboost()
     102             :          *                unlock(l->lock)
     103             :          *                              lock(l->lock)
     104             :          *                              dequeue(T3)
     105             :          *                               ==> wait list is empty
     106             :          *                              deboost()
     107             :          *                               unlock(l->lock)
     108             :          *              lock(l->lock)
     109             :          *              fixup_rt_mutex_waiters()
     110             :          *                if (wait_list_empty(l) {
     111             :          *                  l->owner = owner
     112             :          *                  owner = l->owner & ~HAS_WAITERS;
     113             :          *                    ==> l->owner = T1
     114             :          *                }
     115             :          *                              lock(l->lock)
     116             :          * rt_mutex_unlock(l)           fixup_rt_mutex_waiters()
     117             :          *                                if (wait_list_empty(l) {
     118             :          *                                  owner = l->owner & ~HAS_WAITERS;
     119             :          * cmpxchg(l->owner, T1, NULL)
     120             :          *  ===> Success (l->owner = NULL)
     121             :          *
     122             :          *                                  l->owner = owner
     123             :          *                                    ==> l->owner = T1
     124             :          *                                }
     125             :          *
     126             :          * With the check for the waiter bit in place T3 on CPU2 will not
     127             :          * overwrite. All tasks fiddling with the waiters bit are
     128             :          * serialized by l->lock, so nothing else can modify the waiters
     129             :          * bit. If the bit is set then nothing can change l->owner either
     130             :          * so the simple RMW is safe. The cmpxchg() will simply fail if it
     131             :          * happens in the middle of the RMW because the waiters bit is
     132             :          * still set.
     133             :          */
     134           0 :         owner = READ_ONCE(*p);
     135           0 :         if (owner & RT_MUTEX_HAS_WAITERS)
     136           0 :                 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
     137             : }
     138             : 
     139             : /*
     140             :  * We can speed up the acquire/release, if there's no debugging state to be
     141             :  * set up.
     142             :  */
     143             : #ifndef CONFIG_DEBUG_RT_MUTEXES
     144             : # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
     145             : # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
     146             : 
     147             : /*
     148             :  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
     149             :  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
     150             :  * relaxed semantics suffice.
     151             :  */
     152             : static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
     153             : {
     154             :         unsigned long owner, *p = (unsigned long *) &lock->owner;
     155             : 
     156             :         do {
     157             :                 owner = *p;
     158             :         } while (cmpxchg_relaxed(p, owner,
     159             :                                  owner | RT_MUTEX_HAS_WAITERS) != owner);
     160             : }
     161             : 
     162             : /*
     163             :  * Safe fastpath aware unlock:
     164             :  * 1) Clear the waiters bit
     165             :  * 2) Drop lock->wait_lock
     166             :  * 3) Try to unlock the lock with cmpxchg
     167             :  */
     168             : static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
     169             :                                         unsigned long flags)
     170             :         __releases(lock->wait_lock)
     171             : {
     172             :         struct task_struct *owner = rt_mutex_owner(lock);
     173             : 
     174             :         clear_rt_mutex_waiters(lock);
     175             :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
     176             :         /*
     177             :          * If a new waiter comes in between the unlock and the cmpxchg
     178             :          * we have two situations:
     179             :          *
     180             :          * unlock(wait_lock);
     181             :          *                                      lock(wait_lock);
     182             :          * cmpxchg(p, owner, 0) == owner
     183             :          *                                      mark_rt_mutex_waiters(lock);
     184             :          *                                      acquire(lock);
     185             :          * or:
     186             :          *
     187             :          * unlock(wait_lock);
     188             :          *                                      lock(wait_lock);
     189             :          *                                      mark_rt_mutex_waiters(lock);
     190             :          *
     191             :          * cmpxchg(p, owner, 0) != owner
     192             :          *                                      enqueue_waiter();
     193             :          *                                      unlock(wait_lock);
     194             :          * lock(wait_lock);
     195             :          * wake waiter();
     196             :          * unlock(wait_lock);
     197             :          *                                      lock(wait_lock);
     198             :          *                                      acquire(lock);
     199             :          */
     200             :         return rt_mutex_cmpxchg_release(lock, owner, NULL);
     201             : }
     202             : 
     203             : #else
     204             : # define rt_mutex_cmpxchg_acquire(l,c,n)        (0)
     205             : # define rt_mutex_cmpxchg_release(l,c,n)        (0)
     206             : 
     207           0 : static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
     208             : {
     209           0 :         lock->owner = (struct task_struct *)
     210           0 :                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
     211             : }
     212             : 
     213             : /*
     214             :  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
     215             :  */
     216           0 : static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
     217             :                                         unsigned long flags)
     218             :         __releases(lock->wait_lock)
     219             : {
     220           0 :         lock->owner = NULL;
     221           0 :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
     222           0 :         return true;
     223             : }
     224             : #endif
     225             : 
     226             : /*
     227             :  * Only use with rt_mutex_waiter_{less,equal}()
     228             :  */
     229             : #define task_to_waiter(p)       \
     230             :         &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
     231             : 
     232             : static inline int
     233           0 : rt_mutex_waiter_less(struct rt_mutex_waiter *left,
     234             :                      struct rt_mutex_waiter *right)
     235             : {
     236           0 :         if (left->prio < right->prio)
     237             :                 return 1;
     238             : 
     239             :         /*
     240             :          * If both waiters have dl_prio(), we check the deadlines of the
     241             :          * associated tasks.
     242             :          * If left waiter has a dl_prio(), and we didn't return 1 above,
     243             :          * then right waiter has a dl_prio() too.
     244             :          */
     245           0 :         if (dl_prio(left->prio))
     246           0 :                 return dl_time_before(left->deadline, right->deadline);
     247             : 
     248             :         return 0;
     249             : }
     250             : 
     251             : static inline int
     252           0 : rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
     253             :                       struct rt_mutex_waiter *right)
     254             : {
     255           0 :         if (left->prio != right->prio)
     256             :                 return 0;
     257             : 
     258             :         /*
     259             :          * If both waiters have dl_prio(), we check the deadlines of the
     260             :          * associated tasks.
     261             :          * If left waiter has a dl_prio(), and we didn't return 0 above,
     262             :          * then right waiter has a dl_prio() too.
     263             :          */
     264           0 :         if (dl_prio(left->prio))
     265           0 :                 return left->deadline == right->deadline;
     266             : 
     267             :         return 1;
     268             : }
     269             : 
     270             : #define __node_2_waiter(node) \
     271             :         rb_entry((node), struct rt_mutex_waiter, tree_entry)
     272             : 
     273           0 : static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
     274             : {
     275           0 :         return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
     276             : }
     277             : 
     278             : static void
     279           0 : rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
     280             : {
     281           0 :         rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
     282           0 : }
     283             : 
     284             : static void
     285           0 : rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
     286             : {
     287           0 :         if (RB_EMPTY_NODE(&waiter->tree_entry))
     288             :                 return;
     289             : 
     290           0 :         rb_erase_cached(&waiter->tree_entry, &lock->waiters);
     291           0 :         RB_CLEAR_NODE(&waiter->tree_entry);
     292             : }
     293             : 
     294             : #define __node_2_pi_waiter(node) \
     295             :         rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
     296             : 
     297           0 : static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
     298             : {
     299           0 :         return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
     300             : }
     301             : 
     302             : static void
     303           0 : rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
     304             : {
     305           0 :         rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
     306           0 : }
     307             : 
     308             : static void
     309           0 : rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
     310             : {
     311           0 :         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
     312             :                 return;
     313             : 
     314           0 :         rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
     315           0 :         RB_CLEAR_NODE(&waiter->pi_tree_entry);
     316             : }
     317             : 
     318           0 : static void rt_mutex_adjust_prio(struct task_struct *p)
     319             : {
     320           0 :         struct task_struct *pi_task = NULL;
     321             : 
     322           0 :         lockdep_assert_held(&p->pi_lock);
     323             : 
     324           0 :         if (task_has_pi_waiters(p))
     325           0 :                 pi_task = task_top_pi_waiter(p)->task;
     326             : 
     327           0 :         rt_mutex_setprio(p, pi_task);
     328           0 : }
     329             : 
     330             : /*
     331             :  * Deadlock detection is conditional:
     332             :  *
     333             :  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
     334             :  * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
     335             :  *
     336             :  * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
     337             :  * conducted independent of the detect argument.
     338             :  *
     339             :  * If the waiter argument is NULL this indicates the deboost path and
     340             :  * deadlock detection is disabled independent of the detect argument
     341             :  * and the config settings.
     342             :  */
     343           0 : static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
     344             :                                           enum rtmutex_chainwalk chwalk)
     345             : {
     346             :         /*
     347             :          * This is just a wrapper function for the following call,
     348             :          * because debug_rt_mutex_detect_deadlock() smells like a magic
     349             :          * debug feature and I wanted to keep the cond function in the
     350             :          * main source file along with the comments instead of having
     351             :          * two of the same in the headers.
     352             :          */
     353           0 :         return debug_rt_mutex_detect_deadlock(waiter, chwalk);
     354             : }
     355             : 
     356             : /*
     357             :  * Max number of times we'll walk the boosting chain:
     358             :  */
     359             : int max_lock_depth = 1024;
     360             : 
     361           0 : static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
     362             : {
     363           0 :         return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
     364             : }
     365             : 
     366             : /*
     367             :  * Adjust the priority chain. Also used for deadlock detection.
     368             :  * Decreases task's usage by one - may thus free the task.
     369             :  *
     370             :  * @task:       the task owning the mutex (owner) for which a chain walk is
     371             :  *              probably needed
     372             :  * @chwalk:     do we have to carry out deadlock detection?
     373             :  * @orig_lock:  the mutex (can be NULL if we are walking the chain to recheck
     374             :  *              things for a task that has just got its priority adjusted, and
     375             :  *              is waiting on a mutex)
     376             :  * @next_lock:  the mutex on which the owner of @orig_lock was blocked before
     377             :  *              we dropped its pi_lock. Is never dereferenced, only used for
     378             :  *              comparison to detect lock chain changes.
     379             :  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
     380             :  *              its priority to the mutex owner (can be NULL in the case
     381             :  *              depicted above or if the top waiter is gone away and we are
     382             :  *              actually deboosting the owner)
     383             :  * @top_task:   the current top waiter
     384             :  *
     385             :  * Returns 0 or -EDEADLK.
     386             :  *
     387             :  * Chain walk basics and protection scope
     388             :  *
     389             :  * [R] refcount on task
     390             :  * [P] task->pi_lock held
     391             :  * [L] rtmutex->wait_lock held
     392             :  *
     393             :  * Step Description                             Protected by
     394             :  *      function arguments:
     395             :  *      @task                                   [R]
     396             :  *      @orig_lock if != NULL                   @top_task is blocked on it
     397             :  *      @next_lock                              Unprotected. Cannot be
     398             :  *                                              dereferenced. Only used for
     399             :  *                                              comparison.
     400             :  *      @orig_waiter if != NULL                 @top_task is blocked on it
     401             :  *      @top_task                               current, or in case of proxy
     402             :  *                                              locking protected by calling
     403             :  *                                              code
     404             :  *      again:
     405             :  *        loop_sanity_check();
     406             :  *      retry:
     407             :  * [1]    lock(task->pi_lock);                       [R] acquire [P]
     408             :  * [2]    waiter = task->pi_blocked_on;              [P]
     409             :  * [3]    check_exit_conditions_1();            [P]
     410             :  * [4]    lock = waiter->lock;                       [P]
     411             :  * [5]    if (!try_lock(lock->wait_lock)) {  [P] try to acquire [L]
     412             :  *          unlock(task->pi_lock);           release [P]
     413             :  *          goto retry;
     414             :  *        }
     415             :  * [6]    check_exit_conditions_2();            [P] + [L]
     416             :  * [7]    requeue_lock_waiter(lock, waiter);    [P] + [L]
     417             :  * [8]    unlock(task->pi_lock);             release [P]
     418             :  *        put_task_struct(task);                release [R]
     419             :  * [9]    check_exit_conditions_3();            [L]
     420             :  * [10]   task = owner(lock);                   [L]
     421             :  *        get_task_struct(task);                [L] acquire [R]
     422             :  *        lock(task->pi_lock);                       [L] acquire [P]
     423             :  * [11]   requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
     424             :  * [12]   check_exit_conditions_4();            [P] + [L]
     425             :  * [13]   unlock(task->pi_lock);             release [P]
     426             :  *        unlock(lock->wait_lock);           release [L]
     427             :  *        goto again;
     428             :  */
     429           0 : static int rt_mutex_adjust_prio_chain(struct task_struct *task,
     430             :                                       enum rtmutex_chainwalk chwalk,
     431             :                                       struct rt_mutex *orig_lock,
     432             :                                       struct rt_mutex *next_lock,
     433             :                                       struct rt_mutex_waiter *orig_waiter,
     434             :                                       struct task_struct *top_task)
     435             : {
     436           0 :         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
     437           0 :         struct rt_mutex_waiter *prerequeue_top_waiter;
     438           0 :         int ret = 0, depth = 0;
     439           0 :         struct rt_mutex *lock;
     440           0 :         bool detect_deadlock;
     441           0 :         bool requeue = true;
     442             : 
     443           0 :         detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
     444             : 
     445             :         /*
     446             :          * The (de)boosting is a step by step approach with a lot of
     447             :          * pitfalls. We want this to be preemptible and we want hold a
     448             :          * maximum of two locks per step. So we have to check
     449             :          * carefully whether things change under us.
     450             :          */
     451             :  again:
     452             :         /*
     453             :          * We limit the lock chain length for each invocation.
     454             :          */
     455           0 :         if (++depth > max_lock_depth) {
     456           0 :                 static int prev_max;
     457             : 
     458             :                 /*
     459             :                  * Print this only once. If the admin changes the limit,
     460             :                  * print a new message when reaching the limit again.
     461             :                  */
     462           0 :                 if (prev_max != max_lock_depth) {
     463           0 :                         prev_max = max_lock_depth;
     464           0 :                         printk(KERN_WARNING "Maximum lock depth %d reached "
     465             :                                "task: %s (%d)\n", max_lock_depth,
     466           0 :                                top_task->comm, task_pid_nr(top_task));
     467             :                 }
     468           0 :                 put_task_struct(task);
     469             : 
     470           0 :                 return -EDEADLK;
     471             :         }
     472             : 
     473             :         /*
     474             :          * We are fully preemptible here and only hold the refcount on
     475             :          * @task. So everything can have changed under us since the
     476             :          * caller or our own code below (goto retry/again) dropped all
     477             :          * locks.
     478             :          */
     479           0 :  retry:
     480             :         /*
     481             :          * [1] Task cannot go away as we did a get_task() before !
     482             :          */
     483           0 :         raw_spin_lock_irq(&task->pi_lock);
     484             : 
     485             :         /*
     486             :          * [2] Get the waiter on which @task is blocked on.
     487             :          */
     488           0 :         waiter = task->pi_blocked_on;
     489             : 
     490             :         /*
     491             :          * [3] check_exit_conditions_1() protected by task->pi_lock.
     492             :          */
     493             : 
     494             :         /*
     495             :          * Check whether the end of the boosting chain has been
     496             :          * reached or the state of the chain has changed while we
     497             :          * dropped the locks.
     498             :          */
     499           0 :         if (!waiter)
     500           0 :                 goto out_unlock_pi;
     501             : 
     502             :         /*
     503             :          * Check the orig_waiter state. After we dropped the locks,
     504             :          * the previous owner of the lock might have released the lock.
     505             :          */
     506           0 :         if (orig_waiter && !rt_mutex_owner(orig_lock))
     507           0 :                 goto out_unlock_pi;
     508             : 
     509             :         /*
     510             :          * We dropped all locks after taking a refcount on @task, so
     511             :          * the task might have moved on in the lock chain or even left
     512             :          * the chain completely and blocks now on an unrelated lock or
     513             :          * on @orig_lock.
     514             :          *
     515             :          * We stored the lock on which @task was blocked in @next_lock,
     516             :          * so we can detect the chain change.
     517             :          */
     518           0 :         if (next_lock != waiter->lock)
     519           0 :                 goto out_unlock_pi;
     520             : 
     521             :         /*
     522             :          * Drop out, when the task has no waiters. Note,
     523             :          * top_waiter can be NULL, when we are in the deboosting
     524             :          * mode!
     525             :          */
     526           0 :         if (top_waiter) {
     527           0 :                 if (!task_has_pi_waiters(task))
     528           0 :                         goto out_unlock_pi;
     529             :                 /*
     530             :                  * If deadlock detection is off, we stop here if we
     531             :                  * are not the top pi waiter of the task. If deadlock
     532             :                  * detection is enabled we continue, but stop the
     533             :                  * requeueing in the chain walk.
     534             :                  */
     535           0 :                 if (top_waiter != task_top_pi_waiter(task)) {
     536           0 :                         if (!detect_deadlock)
     537           0 :                                 goto out_unlock_pi;
     538             :                         else
     539             :                                 requeue = false;
     540             :                 }
     541             :         }
     542             : 
     543             :         /*
     544             :          * If the waiter priority is the same as the task priority
     545             :          * then there is no further priority adjustment necessary.  If
     546             :          * deadlock detection is off, we stop the chain walk. If its
     547             :          * enabled we continue, but stop the requeueing in the chain
     548             :          * walk.
     549             :          */
     550           0 :         if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
     551           0 :                 if (!detect_deadlock)
     552           0 :                         goto out_unlock_pi;
     553             :                 else
     554             :                         requeue = false;
     555             :         }
     556             : 
     557             :         /*
     558             :          * [4] Get the next lock
     559             :          */
     560           0 :         lock = waiter->lock;
     561             :         /*
     562             :          * [5] We need to trylock here as we are holding task->pi_lock,
     563             :          * which is the reverse lock order versus the other rtmutex
     564             :          * operations.
     565             :          */
     566           0 :         if (!raw_spin_trylock(&lock->wait_lock)) {
     567           0 :                 raw_spin_unlock_irq(&task->pi_lock);
     568           0 :                 cpu_relax();
     569           0 :                 goto retry;
     570             :         }
     571             : 
     572             :         /*
     573             :          * [6] check_exit_conditions_2() protected by task->pi_lock and
     574             :          * lock->wait_lock.
     575             :          *
     576             :          * Deadlock detection. If the lock is the same as the original
     577             :          * lock which caused us to walk the lock chain or if the
     578             :          * current lock is owned by the task which initiated the chain
     579             :          * walk, we detected a deadlock.
     580             :          */
     581           0 :         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
     582           0 :                 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
     583           0 :                 raw_spin_unlock(&lock->wait_lock);
     584           0 :                 ret = -EDEADLK;
     585           0 :                 goto out_unlock_pi;
     586             :         }
     587             : 
     588             :         /*
     589             :          * If we just follow the lock chain for deadlock detection, no
     590             :          * need to do all the requeue operations. To avoid a truckload
     591             :          * of conditionals around the various places below, just do the
     592             :          * minimum chain walk checks.
     593             :          */
     594           0 :         if (!requeue) {
     595             :                 /*
     596             :                  * No requeue[7] here. Just release @task [8]
     597             :                  */
     598           0 :                 raw_spin_unlock(&task->pi_lock);
     599           0 :                 put_task_struct(task);
     600             : 
     601             :                 /*
     602             :                  * [9] check_exit_conditions_3 protected by lock->wait_lock.
     603             :                  * If there is no owner of the lock, end of chain.
     604             :                  */
     605           0 :                 if (!rt_mutex_owner(lock)) {
     606           0 :                         raw_spin_unlock_irq(&lock->wait_lock);
     607           0 :                         return 0;
     608             :                 }
     609             : 
     610             :                 /* [10] Grab the next task, i.e. owner of @lock */
     611           0 :                 task = get_task_struct(rt_mutex_owner(lock));
     612           0 :                 raw_spin_lock(&task->pi_lock);
     613             : 
     614             :                 /*
     615             :                  * No requeue [11] here. We just do deadlock detection.
     616             :                  *
     617             :                  * [12] Store whether owner is blocked
     618             :                  * itself. Decision is made after dropping the locks
     619             :                  */
     620           0 :                 next_lock = task_blocked_on_lock(task);
     621             :                 /*
     622             :                  * Get the top waiter for the next iteration
     623             :                  */
     624           0 :                 top_waiter = rt_mutex_top_waiter(lock);
     625             : 
     626             :                 /* [13] Drop locks */
     627           0 :                 raw_spin_unlock(&task->pi_lock);
     628           0 :                 raw_spin_unlock_irq(&lock->wait_lock);
     629             : 
     630             :                 /* If owner is not blocked, end of chain. */
     631           0 :                 if (!next_lock)
     632           0 :                         goto out_put_task;
     633           0 :                 goto again;
     634             :         }
     635             : 
     636             :         /*
     637             :          * Store the current top waiter before doing the requeue
     638             :          * operation on @lock. We need it for the boost/deboost
     639             :          * decision below.
     640             :          */
     641           0 :         prerequeue_top_waiter = rt_mutex_top_waiter(lock);
     642             : 
     643             :         /* [7] Requeue the waiter in the lock waiter tree. */
     644           0 :         rt_mutex_dequeue(lock, waiter);
     645             : 
     646             :         /*
     647             :          * Update the waiter prio fields now that we're dequeued.
     648             :          *
     649             :          * These values can have changed through either:
     650             :          *
     651             :          *   sys_sched_set_scheduler() / sys_sched_setattr()
     652             :          *
     653             :          * or
     654             :          *
     655             :          *   DL CBS enforcement advancing the effective deadline.
     656             :          *
     657             :          * Even though pi_waiters also uses these fields, and that tree is only
     658             :          * updated in [11], we can do this here, since we hold [L], which
     659             :          * serializes all pi_waiters access and rb_erase() does not care about
     660             :          * the values of the node being removed.
     661             :          */
     662           0 :         waiter->prio = task->prio;
     663           0 :         waiter->deadline = task->dl.deadline;
     664             : 
     665           0 :         rt_mutex_enqueue(lock, waiter);
     666             : 
     667             :         /* [8] Release the task */
     668           0 :         raw_spin_unlock(&task->pi_lock);
     669           0 :         put_task_struct(task);
     670             : 
     671             :         /*
     672             :          * [9] check_exit_conditions_3 protected by lock->wait_lock.
     673             :          *
     674             :          * We must abort the chain walk if there is no lock owner even
     675             :          * in the dead lock detection case, as we have nothing to
     676             :          * follow here. This is the end of the chain we are walking.
     677             :          */
     678           0 :         if (!rt_mutex_owner(lock)) {
     679             :                 /*
     680             :                  * If the requeue [7] above changed the top waiter,
     681             :                  * then we need to wake the new top waiter up to try
     682             :                  * to get the lock.
     683             :                  */
     684           0 :                 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
     685           0 :                         wake_up_process(rt_mutex_top_waiter(lock)->task);
     686           0 :                 raw_spin_unlock_irq(&lock->wait_lock);
     687           0 :                 return 0;
     688             :         }
     689             : 
     690             :         /* [10] Grab the next task, i.e. the owner of @lock */
     691           0 :         task = get_task_struct(rt_mutex_owner(lock));
     692           0 :         raw_spin_lock(&task->pi_lock);
     693             : 
     694             :         /* [11] requeue the pi waiters if necessary */
     695           0 :         if (waiter == rt_mutex_top_waiter(lock)) {
     696             :                 /*
     697             :                  * The waiter became the new top (highest priority)
     698             :                  * waiter on the lock. Replace the previous top waiter
     699             :                  * in the owner tasks pi waiters tree with this waiter
     700             :                  * and adjust the priority of the owner.
     701             :                  */
     702           0 :                 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
     703           0 :                 rt_mutex_enqueue_pi(task, waiter);
     704           0 :                 rt_mutex_adjust_prio(task);
     705             : 
     706           0 :         } else if (prerequeue_top_waiter == waiter) {
     707             :                 /*
     708             :                  * The waiter was the top waiter on the lock, but is
     709             :                  * no longer the top prority waiter. Replace waiter in
     710             :                  * the owner tasks pi waiters tree with the new top
     711             :                  * (highest priority) waiter and adjust the priority
     712             :                  * of the owner.
     713             :                  * The new top waiter is stored in @waiter so that
     714             :                  * @waiter == @top_waiter evaluates to true below and
     715             :                  * we continue to deboost the rest of the chain.
     716             :                  */
     717           0 :                 rt_mutex_dequeue_pi(task, waiter);
     718           0 :                 waiter = rt_mutex_top_waiter(lock);
     719           0 :                 rt_mutex_enqueue_pi(task, waiter);
     720           0 :                 rt_mutex_adjust_prio(task);
     721             :         } else {
     722             :                 /*
     723             :                  * Nothing changed. No need to do any priority
     724             :                  * adjustment.
     725             :                  */
     726           0 :         }
     727             : 
     728             :         /*
     729             :          * [12] check_exit_conditions_4() protected by task->pi_lock
     730             :          * and lock->wait_lock. The actual decisions are made after we
     731             :          * dropped the locks.
     732             :          *
     733             :          * Check whether the task which owns the current lock is pi
     734             :          * blocked itself. If yes we store a pointer to the lock for
     735             :          * the lock chain change detection above. After we dropped
     736             :          * task->pi_lock next_lock cannot be dereferenced anymore.
     737             :          */
     738           0 :         next_lock = task_blocked_on_lock(task);
     739             :         /*
     740             :          * Store the top waiter of @lock for the end of chain walk
     741             :          * decision below.
     742             :          */
     743           0 :         top_waiter = rt_mutex_top_waiter(lock);
     744             : 
     745             :         /* [13] Drop the locks */
     746           0 :         raw_spin_unlock(&task->pi_lock);
     747           0 :         raw_spin_unlock_irq(&lock->wait_lock);
     748             : 
     749             :         /*
     750             :          * Make the actual exit decisions [12], based on the stored
     751             :          * values.
     752             :          *
     753             :          * We reached the end of the lock chain. Stop right here. No
     754             :          * point to go back just to figure that out.
     755             :          */
     756           0 :         if (!next_lock)
     757           0 :                 goto out_put_task;
     758             : 
     759             :         /*
     760             :          * If the current waiter is not the top waiter on the lock,
     761             :          * then we can stop the chain walk here if we are not in full
     762             :          * deadlock detection mode.
     763             :          */
     764           0 :         if (!detect_deadlock && waiter != top_waiter)
     765           0 :                 goto out_put_task;
     766             : 
     767           0 :         goto again;
     768             : 
     769           0 :  out_unlock_pi:
     770           0 :         raw_spin_unlock_irq(&task->pi_lock);
     771           0 :  out_put_task:
     772           0 :         put_task_struct(task);
     773             : 
     774           0 :         return ret;
     775             : }
     776             : 
     777             : /*
     778             :  * Try to take an rt-mutex
     779             :  *
     780             :  * Must be called with lock->wait_lock held and interrupts disabled
     781             :  *
     782             :  * @lock:   The lock to be acquired.
     783             :  * @task:   The task which wants to acquire the lock
     784             :  * @waiter: The waiter that is queued to the lock's wait tree if the
     785             :  *          callsite called task_blocked_on_lock(), otherwise NULL
     786             :  */
     787           0 : static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
     788             :                                 struct rt_mutex_waiter *waiter)
     789             : {
     790           0 :         lockdep_assert_held(&lock->wait_lock);
     791             : 
     792             :         /*
     793             :          * Before testing whether we can acquire @lock, we set the
     794             :          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
     795             :          * other tasks which try to modify @lock into the slow path
     796             :          * and they serialize on @lock->wait_lock.
     797             :          *
     798             :          * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
     799             :          * as explained at the top of this file if and only if:
     800             :          *
     801             :          * - There is a lock owner. The caller must fixup the
     802             :          *   transient state if it does a trylock or leaves the lock
     803             :          *   function due to a signal or timeout.
     804             :          *
     805             :          * - @task acquires the lock and there are no other
     806             :          *   waiters. This is undone in rt_mutex_set_owner(@task) at
     807             :          *   the end of this function.
     808             :          */
     809           0 :         mark_rt_mutex_waiters(lock);
     810             : 
     811             :         /*
     812             :          * If @lock has an owner, give up.
     813             :          */
     814           0 :         if (rt_mutex_owner(lock))
     815             :                 return 0;
     816             : 
     817             :         /*
     818             :          * If @waiter != NULL, @task has already enqueued the waiter
     819             :          * into @lock waiter tree. If @waiter == NULL then this is a
     820             :          * trylock attempt.
     821             :          */
     822           0 :         if (waiter) {
     823             :                 /*
     824             :                  * If waiter is not the highest priority waiter of
     825             :                  * @lock, give up.
     826             :                  */
     827           0 :                 if (waiter != rt_mutex_top_waiter(lock))
     828             :                         return 0;
     829             : 
     830             :                 /*
     831             :                  * We can acquire the lock. Remove the waiter from the
     832             :                  * lock waiters tree.
     833             :                  */
     834           0 :                 rt_mutex_dequeue(lock, waiter);
     835             : 
     836             :         } else {
     837             :                 /*
     838             :                  * If the lock has waiters already we check whether @task is
     839             :                  * eligible to take over the lock.
     840             :                  *
     841             :                  * If there are no other waiters, @task can acquire
     842             :                  * the lock.  @task->pi_blocked_on is NULL, so it does
     843             :                  * not need to be dequeued.
     844             :                  */
     845           0 :                 if (rt_mutex_has_waiters(lock)) {
     846             :                         /*
     847             :                          * If @task->prio is greater than or equal to
     848             :                          * the top waiter priority (kernel view),
     849             :                          * @task lost.
     850             :                          */
     851           0 :                         if (!rt_mutex_waiter_less(task_to_waiter(task),
     852             :                                                   rt_mutex_top_waiter(lock)))
     853             :                                 return 0;
     854             : 
     855             :                         /*
     856             :                          * The current top waiter stays enqueued. We
     857             :                          * don't have to change anything in the lock
     858             :                          * waiters order.
     859             :                          */
     860             :                 } else {
     861             :                         /*
     862             :                          * No waiters. Take the lock without the
     863             :                          * pi_lock dance.@task->pi_blocked_on is NULL
     864             :                          * and we have no waiters to enqueue in @task
     865             :                          * pi waiters tree.
     866             :                          */
     867           0 :                         goto takeit;
     868             :                 }
     869             :         }
     870             : 
     871             :         /*
     872             :          * Clear @task->pi_blocked_on. Requires protection by
     873             :          * @task->pi_lock. Redundant operation for the @waiter == NULL
     874             :          * case, but conditionals are more expensive than a redundant
     875             :          * store.
     876             :          */
     877           0 :         raw_spin_lock(&task->pi_lock);
     878           0 :         task->pi_blocked_on = NULL;
     879             :         /*
     880             :          * Finish the lock acquisition. @task is the new owner. If
     881             :          * other waiters exist we have to insert the highest priority
     882             :          * waiter into @task->pi_waiters tree.
     883             :          */
     884           0 :         if (rt_mutex_has_waiters(lock))
     885           0 :                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
     886           0 :         raw_spin_unlock(&task->pi_lock);
     887             : 
     888           0 : takeit:
     889             :         /* We got the lock. */
     890           0 :         debug_rt_mutex_lock(lock);
     891             : 
     892             :         /*
     893             :          * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
     894             :          * are still waiters or clears it.
     895             :          */
     896           0 :         rt_mutex_set_owner(lock, task);
     897             : 
     898           0 :         return 1;
     899             : }
     900             : 
     901             : /*
     902             :  * Task blocks on lock.
     903             :  *
     904             :  * Prepare waiter and propagate pi chain
     905             :  *
     906             :  * This must be called with lock->wait_lock held and interrupts disabled
     907             :  */
     908           0 : static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
     909             :                                    struct rt_mutex_waiter *waiter,
     910             :                                    struct task_struct *task,
     911             :                                    enum rtmutex_chainwalk chwalk)
     912             : {
     913           0 :         struct task_struct *owner = rt_mutex_owner(lock);
     914           0 :         struct rt_mutex_waiter *top_waiter = waiter;
     915           0 :         struct rt_mutex *next_lock;
     916           0 :         int chain_walk = 0, res;
     917             : 
     918           0 :         lockdep_assert_held(&lock->wait_lock);
     919             : 
     920             :         /*
     921             :          * Early deadlock detection. We really don't want the task to
     922             :          * enqueue on itself just to untangle the mess later. It's not
     923             :          * only an optimization. We drop the locks, so another waiter
     924             :          * can come in before the chain walk detects the deadlock. So
     925             :          * the other will detect the deadlock and return -EDEADLOCK,
     926             :          * which is wrong, as the other waiter is not in a deadlock
     927             :          * situation.
     928             :          */
     929           0 :         if (owner == task)
     930             :                 return -EDEADLK;
     931             : 
     932           0 :         raw_spin_lock(&task->pi_lock);
     933           0 :         waiter->task = task;
     934           0 :         waiter->lock = lock;
     935           0 :         waiter->prio = task->prio;
     936           0 :         waiter->deadline = task->dl.deadline;
     937             : 
     938             :         /* Get the top priority waiter on the lock */
     939           0 :         if (rt_mutex_has_waiters(lock))
     940           0 :                 top_waiter = rt_mutex_top_waiter(lock);
     941           0 :         rt_mutex_enqueue(lock, waiter);
     942             : 
     943           0 :         task->pi_blocked_on = waiter;
     944             : 
     945           0 :         raw_spin_unlock(&task->pi_lock);
     946             : 
     947           0 :         if (!owner)
     948             :                 return 0;
     949             : 
     950           0 :         raw_spin_lock(&owner->pi_lock);
     951           0 :         if (waiter == rt_mutex_top_waiter(lock)) {
     952           0 :                 rt_mutex_dequeue_pi(owner, top_waiter);
     953           0 :                 rt_mutex_enqueue_pi(owner, waiter);
     954             : 
     955           0 :                 rt_mutex_adjust_prio(owner);
     956           0 :                 if (owner->pi_blocked_on)
     957           0 :                         chain_walk = 1;
     958           0 :         } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
     959           0 :                 chain_walk = 1;
     960             :         }
     961             : 
     962             :         /* Store the lock on which owner is blocked or NULL */
     963           0 :         next_lock = task_blocked_on_lock(owner);
     964             : 
     965           0 :         raw_spin_unlock(&owner->pi_lock);
     966             :         /*
     967             :          * Even if full deadlock detection is on, if the owner is not
     968             :          * blocked itself, we can avoid finding this out in the chain
     969             :          * walk.
     970             :          */
     971           0 :         if (!chain_walk || !next_lock)
     972             :                 return 0;
     973             : 
     974             :         /*
     975             :          * The owner can't disappear while holding a lock,
     976             :          * so the owner struct is protected by wait_lock.
     977             :          * Gets dropped in rt_mutex_adjust_prio_chain()!
     978             :          */
     979           0 :         get_task_struct(owner);
     980             : 
     981           0 :         raw_spin_unlock_irq(&lock->wait_lock);
     982             : 
     983           0 :         res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
     984             :                                          next_lock, waiter, task);
     985             : 
     986           0 :         raw_spin_lock_irq(&lock->wait_lock);
     987             : 
     988           0 :         return res;
     989             : }
     990             : 
     991             : /*
     992             :  * Remove the top waiter from the current tasks pi waiter tree and
     993             :  * queue it up.
     994             :  *
     995             :  * Called with lock->wait_lock held and interrupts disabled.
     996             :  */
     997           0 : static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
     998             :                                     struct rt_mutex *lock)
     999             : {
    1000           0 :         struct rt_mutex_waiter *waiter;
    1001             : 
    1002           0 :         raw_spin_lock(&current->pi_lock);
    1003             : 
    1004           0 :         waiter = rt_mutex_top_waiter(lock);
    1005             : 
    1006             :         /*
    1007             :          * Remove it from current->pi_waiters and deboost.
    1008             :          *
    1009             :          * We must in fact deboost here in order to ensure we call
    1010             :          * rt_mutex_setprio() to update p->pi_top_task before the
    1011             :          * task unblocks.
    1012             :          */
    1013           0 :         rt_mutex_dequeue_pi(current, waiter);
    1014           0 :         rt_mutex_adjust_prio(current);
    1015             : 
    1016             :         /*
    1017             :          * As we are waking up the top waiter, and the waiter stays
    1018             :          * queued on the lock until it gets the lock, this lock
    1019             :          * obviously has waiters. Just set the bit here and this has
    1020             :          * the added benefit of forcing all new tasks into the
    1021             :          * slow path making sure no task of lower priority than
    1022             :          * the top waiter can steal this lock.
    1023             :          */
    1024           0 :         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
    1025             : 
    1026             :         /*
    1027             :          * We deboosted before waking the top waiter task such that we don't
    1028             :          * run two tasks with the 'same' priority (and ensure the
    1029             :          * p->pi_top_task pointer points to a blocked task). This however can
    1030             :          * lead to priority inversion if we would get preempted after the
    1031             :          * deboost but before waking our donor task, hence the preempt_disable()
    1032             :          * before unlock.
    1033             :          *
    1034             :          * Pairs with preempt_enable() in rt_mutex_postunlock();
    1035             :          */
    1036           0 :         preempt_disable();
    1037           0 :         wake_q_add(wake_q, waiter->task);
    1038           0 :         raw_spin_unlock(&current->pi_lock);
    1039           0 : }
    1040             : 
    1041             : /*
    1042             :  * Remove a waiter from a lock and give up
    1043             :  *
    1044             :  * Must be called with lock->wait_lock held and interrupts disabled. I must
    1045             :  * have just failed to try_to_take_rt_mutex().
    1046             :  */
    1047           0 : static void remove_waiter(struct rt_mutex *lock,
    1048             :                           struct rt_mutex_waiter *waiter)
    1049             : {
    1050           0 :         bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
    1051           0 :         struct task_struct *owner = rt_mutex_owner(lock);
    1052           0 :         struct rt_mutex *next_lock;
    1053             : 
    1054           0 :         lockdep_assert_held(&lock->wait_lock);
    1055             : 
    1056           0 :         raw_spin_lock(&current->pi_lock);
    1057           0 :         rt_mutex_dequeue(lock, waiter);
    1058           0 :         current->pi_blocked_on = NULL;
    1059           0 :         raw_spin_unlock(&current->pi_lock);
    1060             : 
    1061             :         /*
    1062             :          * Only update priority if the waiter was the highest priority
    1063             :          * waiter of the lock and there is an owner to update.
    1064             :          */
    1065           0 :         if (!owner || !is_top_waiter)
    1066             :                 return;
    1067             : 
    1068           0 :         raw_spin_lock(&owner->pi_lock);
    1069             : 
    1070           0 :         rt_mutex_dequeue_pi(owner, waiter);
    1071             : 
    1072           0 :         if (rt_mutex_has_waiters(lock))
    1073           0 :                 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
    1074             : 
    1075           0 :         rt_mutex_adjust_prio(owner);
    1076             : 
    1077             :         /* Store the lock on which owner is blocked or NULL */
    1078           0 :         next_lock = task_blocked_on_lock(owner);
    1079             : 
    1080           0 :         raw_spin_unlock(&owner->pi_lock);
    1081             : 
    1082             :         /*
    1083             :          * Don't walk the chain, if the owner task is not blocked
    1084             :          * itself.
    1085             :          */
    1086           0 :         if (!next_lock)
    1087             :                 return;
    1088             : 
    1089             :         /* gets dropped in rt_mutex_adjust_prio_chain()! */
    1090           0 :         get_task_struct(owner);
    1091             : 
    1092           0 :         raw_spin_unlock_irq(&lock->wait_lock);
    1093             : 
    1094           0 :         rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
    1095             :                                    next_lock, NULL, current);
    1096             : 
    1097           0 :         raw_spin_lock_irq(&lock->wait_lock);
    1098             : }
    1099             : 
    1100             : /*
    1101             :  * Recheck the pi chain, in case we got a priority setting
    1102             :  *
    1103             :  * Called from sched_setscheduler
    1104             :  */
    1105           4 : void rt_mutex_adjust_pi(struct task_struct *task)
    1106             : {
    1107           4 :         struct rt_mutex_waiter *waiter;
    1108           4 :         struct rt_mutex *next_lock;
    1109           4 :         unsigned long flags;
    1110             : 
    1111           4 :         raw_spin_lock_irqsave(&task->pi_lock, flags);
    1112             : 
    1113           4 :         waiter = task->pi_blocked_on;
    1114           4 :         if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
    1115           4 :                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
    1116           4 :                 return;
    1117             :         }
    1118           0 :         next_lock = waiter->lock;
    1119           0 :         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
    1120             : 
    1121             :         /* gets dropped in rt_mutex_adjust_prio_chain()! */
    1122           0 :         get_task_struct(task);
    1123             : 
    1124           0 :         rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
    1125             :                                    next_lock, NULL, task);
    1126             : }
    1127             : 
    1128           0 : void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
    1129             : {
    1130           0 :         debug_rt_mutex_init_waiter(waiter);
    1131           0 :         RB_CLEAR_NODE(&waiter->pi_tree_entry);
    1132           0 :         RB_CLEAR_NODE(&waiter->tree_entry);
    1133           0 :         waiter->task = NULL;
    1134           0 : }
    1135             : 
    1136             : /**
    1137             :  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
    1138             :  * @lock:                the rt_mutex to take
    1139             :  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
    1140             :  *                       or TASK_UNINTERRUPTIBLE)
    1141             :  * @timeout:             the pre-initialized and started timer, or NULL for none
    1142             :  * @waiter:              the pre-initialized rt_mutex_waiter
    1143             :  *
    1144             :  * Must be called with lock->wait_lock held and interrupts disabled
    1145             :  */
    1146             : static int __sched
    1147           0 : __rt_mutex_slowlock(struct rt_mutex *lock, int state,
    1148             :                     struct hrtimer_sleeper *timeout,
    1149             :                     struct rt_mutex_waiter *waiter)
    1150             : {
    1151           0 :         int ret = 0;
    1152             : 
    1153           0 :         for (;;) {
    1154             :                 /* Try to acquire the lock: */
    1155           0 :                 if (try_to_take_rt_mutex(lock, current, waiter))
    1156             :                         break;
    1157             : 
    1158             :                 /*
    1159             :                  * TASK_INTERRUPTIBLE checks for signals and
    1160             :                  * timeout. Ignored otherwise.
    1161             :                  */
    1162           0 :                 if (likely(state == TASK_INTERRUPTIBLE)) {
    1163             :                         /* Signal pending? */
    1164           0 :                         if (signal_pending(current))
    1165           0 :                                 ret = -EINTR;
    1166           0 :                         if (timeout && !timeout->task)
    1167             :                                 ret = -ETIMEDOUT;
    1168           0 :                         if (ret)
    1169             :                                 break;
    1170             :                 }
    1171             : 
    1172           0 :                 raw_spin_unlock_irq(&lock->wait_lock);
    1173             : 
    1174           0 :                 debug_rt_mutex_print_deadlock(waiter);
    1175             : 
    1176           0 :                 schedule();
    1177             : 
    1178           0 :                 raw_spin_lock_irq(&lock->wait_lock);
    1179           0 :                 set_current_state(state);
    1180             :         }
    1181             : 
    1182           0 :         __set_current_state(TASK_RUNNING);
    1183           0 :         return ret;
    1184             : }
    1185             : 
    1186           0 : static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
    1187             :                                      struct rt_mutex_waiter *w)
    1188             : {
    1189             :         /*
    1190             :          * If the result is not -EDEADLOCK or the caller requested
    1191             :          * deadlock detection, nothing to do here.
    1192             :          */
    1193           0 :         if (res != -EDEADLOCK || detect_deadlock)
    1194           0 :                 return;
    1195             : 
    1196             :         /*
    1197             :          * Yell lowdly and stop the task right here.
    1198             :          */
    1199           0 :         rt_mutex_print_deadlock(w);
    1200           0 :         while (1) {
    1201           0 :                 set_current_state(TASK_INTERRUPTIBLE);
    1202           0 :                 schedule();
    1203             :         }
    1204             : }
    1205             : 
    1206             : /*
    1207             :  * Slow path lock function:
    1208             :  */
    1209             : static int __sched
    1210           0 : rt_mutex_slowlock(struct rt_mutex *lock, int state,
    1211             :                   struct hrtimer_sleeper *timeout,
    1212             :                   enum rtmutex_chainwalk chwalk)
    1213             : {
    1214           0 :         struct rt_mutex_waiter waiter;
    1215           0 :         unsigned long flags;
    1216           0 :         int ret = 0;
    1217             : 
    1218           0 :         rt_mutex_init_waiter(&waiter);
    1219             : 
    1220             :         /*
    1221             :          * Technically we could use raw_spin_[un]lock_irq() here, but this can
    1222             :          * be called in early boot if the cmpxchg() fast path is disabled
    1223             :          * (debug, no architecture support). In this case we will acquire the
    1224             :          * rtmutex with lock->wait_lock held. But we cannot unconditionally
    1225             :          * enable interrupts in that early boot case. So we need to use the
    1226             :          * irqsave/restore variants.
    1227             :          */
    1228           0 :         raw_spin_lock_irqsave(&lock->wait_lock, flags);
    1229             : 
    1230             :         /* Try to acquire the lock again: */
    1231           0 :         if (try_to_take_rt_mutex(lock, current, NULL)) {
    1232           0 :                 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    1233           0 :                 return 0;
    1234             :         }
    1235             : 
    1236           0 :         set_current_state(state);
    1237             : 
    1238             :         /* Setup the timer, when timeout != NULL */
    1239           0 :         if (unlikely(timeout))
    1240           0 :                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
    1241             : 
    1242           0 :         ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
    1243             : 
    1244           0 :         if (likely(!ret))
    1245             :                 /* sleep on the mutex */
    1246           0 :                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
    1247             : 
    1248           0 :         if (unlikely(ret)) {
    1249           0 :                 __set_current_state(TASK_RUNNING);
    1250           0 :                 remove_waiter(lock, &waiter);
    1251           0 :                 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
    1252             :         }
    1253             : 
    1254             :         /*
    1255             :          * try_to_take_rt_mutex() sets the waiter bit
    1256             :          * unconditionally. We might have to fix that up.
    1257             :          */
    1258           0 :         fixup_rt_mutex_waiters(lock);
    1259             : 
    1260           0 :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    1261             : 
    1262             :         /* Remove pending timer: */
    1263           0 :         if (unlikely(timeout))
    1264           0 :                 hrtimer_cancel(&timeout->timer);
    1265             : 
    1266           0 :         debug_rt_mutex_free_waiter(&waiter);
    1267             : 
    1268           0 :         return ret;
    1269             : }
    1270             : 
    1271           0 : static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
    1272             : {
    1273           0 :         int ret = try_to_take_rt_mutex(lock, current, NULL);
    1274             : 
    1275             :         /*
    1276             :          * try_to_take_rt_mutex() sets the lock waiters bit
    1277             :          * unconditionally. Clean this up.
    1278             :          */
    1279           0 :         fixup_rt_mutex_waiters(lock);
    1280             : 
    1281           0 :         return ret;
    1282             : }
    1283             : 
    1284             : /*
    1285             :  * Slow path try-lock function:
    1286             :  */
    1287           0 : static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
    1288             : {
    1289           0 :         unsigned long flags;
    1290           0 :         int ret;
    1291             : 
    1292             :         /*
    1293             :          * If the lock already has an owner we fail to get the lock.
    1294             :          * This can be done without taking the @lock->wait_lock as
    1295             :          * it is only being read, and this is a trylock anyway.
    1296             :          */
    1297           0 :         if (rt_mutex_owner(lock))
    1298             :                 return 0;
    1299             : 
    1300             :         /*
    1301             :          * The mutex has currently no owner. Lock the wait lock and try to
    1302             :          * acquire the lock. We use irqsave here to support early boot calls.
    1303             :          */
    1304           0 :         raw_spin_lock_irqsave(&lock->wait_lock, flags);
    1305             : 
    1306           0 :         ret = __rt_mutex_slowtrylock(lock);
    1307             : 
    1308           0 :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    1309             : 
    1310           0 :         return ret;
    1311             : }
    1312             : 
    1313             : /*
    1314             :  * Slow path to release a rt-mutex.
    1315             :  *
    1316             :  * Return whether the current task needs to call rt_mutex_postunlock().
    1317             :  */
    1318           0 : static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
    1319             :                                         struct wake_q_head *wake_q)
    1320             : {
    1321           0 :         unsigned long flags;
    1322             : 
    1323             :         /* irqsave required to support early boot calls */
    1324           0 :         raw_spin_lock_irqsave(&lock->wait_lock, flags);
    1325             : 
    1326           0 :         debug_rt_mutex_unlock(lock);
    1327             : 
    1328             :         /*
    1329             :          * We must be careful here if the fast path is enabled. If we
    1330             :          * have no waiters queued we cannot set owner to NULL here
    1331             :          * because of:
    1332             :          *
    1333             :          * foo->lock->owner = NULL;
    1334             :          *                      rtmutex_lock(foo->lock);   <- fast path
    1335             :          *                      free = atomic_dec_and_test(foo->refcnt);
    1336             :          *                      rtmutex_unlock(foo->lock); <- fast path
    1337             :          *                      if (free)
    1338             :          *                              kfree(foo);
    1339             :          * raw_spin_unlock(foo->lock->wait_lock);
    1340             :          *
    1341             :          * So for the fastpath enabled kernel:
    1342             :          *
    1343             :          * Nothing can set the waiters bit as long as we hold
    1344             :          * lock->wait_lock. So we do the following sequence:
    1345             :          *
    1346             :          *      owner = rt_mutex_owner(lock);
    1347             :          *      clear_rt_mutex_waiters(lock);
    1348             :          *      raw_spin_unlock(&lock->wait_lock);
    1349             :          *      if (cmpxchg(&lock->owner, owner, 0) == owner)
    1350             :          *              return;
    1351             :          *      goto retry;
    1352             :          *
    1353             :          * The fastpath disabled variant is simple as all access to
    1354             :          * lock->owner is serialized by lock->wait_lock:
    1355             :          *
    1356             :          *      lock->owner = NULL;
    1357             :          *      raw_spin_unlock(&lock->wait_lock);
    1358             :          */
    1359           0 :         while (!rt_mutex_has_waiters(lock)) {
    1360             :                 /* Drops lock->wait_lock ! */
    1361           0 :                 if (unlock_rt_mutex_safe(lock, flags) == true)
    1362           0 :                         return false;
    1363             :                 /* Relock the rtmutex and try again */
    1364           0 :                 raw_spin_lock_irqsave(&lock->wait_lock, flags);
    1365             :         }
    1366             : 
    1367             :         /*
    1368             :          * The wakeup next waiter path does not suffer from the above
    1369             :          * race. See the comments there.
    1370             :          *
    1371             :          * Queue the next waiter for wakeup once we release the wait_lock.
    1372             :          */
    1373           0 :         mark_wakeup_next_waiter(wake_q, lock);
    1374           0 :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    1375             : 
    1376           0 :         return true; /* call rt_mutex_postunlock() */
    1377             : }
    1378             : 
    1379             : /*
    1380             :  * debug aware fast / slowpath lock,trylock,unlock
    1381             :  *
    1382             :  * The atomic acquire/release ops are compiled away, when either the
    1383             :  * architecture does not support cmpxchg or when debugging is enabled.
    1384             :  */
    1385             : static inline int
    1386           0 : rt_mutex_fastlock(struct rt_mutex *lock, int state,
    1387             :                   int (*slowfn)(struct rt_mutex *lock, int state,
    1388             :                                 struct hrtimer_sleeper *timeout,
    1389             :                                 enum rtmutex_chainwalk chwalk))
    1390             : {
    1391           0 :         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
    1392             :                 return 0;
    1393             : 
    1394           0 :         return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
    1395             : }
    1396             : 
    1397             : static inline int
    1398           0 : rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
    1399             :                         struct hrtimer_sleeper *timeout,
    1400             :                         enum rtmutex_chainwalk chwalk,
    1401             :                         int (*slowfn)(struct rt_mutex *lock, int state,
    1402             :                                       struct hrtimer_sleeper *timeout,
    1403             :                                       enum rtmutex_chainwalk chwalk))
    1404             : {
    1405           0 :         if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
    1406             :             likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
    1407             :                 return 0;
    1408             : 
    1409           0 :         return slowfn(lock, state, timeout, chwalk);
    1410             : }
    1411             : 
    1412             : static inline int
    1413           0 : rt_mutex_fasttrylock(struct rt_mutex *lock,
    1414             :                      int (*slowfn)(struct rt_mutex *lock))
    1415             : {
    1416           0 :         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
    1417             :                 return 1;
    1418             : 
    1419           0 :         return slowfn(lock);
    1420             : }
    1421             : 
    1422             : /*
    1423             :  * Performs the wakeup of the top-waiter and re-enables preemption.
    1424             :  */
    1425           0 : void rt_mutex_postunlock(struct wake_q_head *wake_q)
    1426             : {
    1427           0 :         wake_up_q(wake_q);
    1428             : 
    1429             :         /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
    1430           0 :         preempt_enable();
    1431           0 : }
    1432             : 
    1433             : static inline void
    1434           0 : rt_mutex_fastunlock(struct rt_mutex *lock,
    1435             :                     bool (*slowfn)(struct rt_mutex *lock,
    1436             :                                    struct wake_q_head *wqh))
    1437             : {
    1438           0 :         DEFINE_WAKE_Q(wake_q);
    1439             : 
    1440           0 :         if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
    1441             :                 return;
    1442             : 
    1443           0 :         if (slowfn(lock, &wake_q))
    1444           0 :                 rt_mutex_postunlock(&wake_q);
    1445             : }
    1446             : 
    1447           0 : static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
    1448             : {
    1449           0 :         might_sleep();
    1450             : 
    1451           0 :         mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
    1452           0 :         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
    1453           0 : }
    1454             : 
    1455             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
    1456             : /**
    1457             :  * rt_mutex_lock_nested - lock a rt_mutex
    1458             :  *
    1459             :  * @lock: the rt_mutex to be locked
    1460             :  * @subclass: the lockdep subclass
    1461             :  */
    1462           0 : void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
    1463             : {
    1464           0 :         __rt_mutex_lock(lock, subclass);
    1465           0 : }
    1466             : EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
    1467             : 
    1468             : #else /* !CONFIG_DEBUG_LOCK_ALLOC */
    1469             : 
    1470             : /**
    1471             :  * rt_mutex_lock - lock a rt_mutex
    1472             :  *
    1473             :  * @lock: the rt_mutex to be locked
    1474             :  */
    1475             : void __sched rt_mutex_lock(struct rt_mutex *lock)
    1476             : {
    1477             :         __rt_mutex_lock(lock, 0);
    1478             : }
    1479             : EXPORT_SYMBOL_GPL(rt_mutex_lock);
    1480             : #endif
    1481             : 
    1482             : /**
    1483             :  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
    1484             :  *
    1485             :  * @lock:               the rt_mutex to be locked
    1486             :  *
    1487             :  * Returns:
    1488             :  *  0           on success
    1489             :  * -EINTR       when interrupted by a signal
    1490             :  */
    1491           0 : int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
    1492             : {
    1493           0 :         int ret;
    1494             : 
    1495           0 :         might_sleep();
    1496             : 
    1497           0 :         mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
    1498           0 :         ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
    1499           0 :         if (ret)
    1500           0 :                 mutex_release(&lock->dep_map, _RET_IP_);
    1501             : 
    1502           0 :         return ret;
    1503             : }
    1504             : EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
    1505             : 
    1506             : /*
    1507             :  * Futex variant, must not use fastpath.
    1508             :  */
    1509           0 : int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
    1510             : {
    1511           0 :         return rt_mutex_slowtrylock(lock);
    1512             : }
    1513             : 
    1514           0 : int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
    1515             : {
    1516           0 :         return __rt_mutex_slowtrylock(lock);
    1517             : }
    1518             : 
    1519             : /**
    1520             :  * rt_mutex_timed_lock - lock a rt_mutex interruptible
    1521             :  *                      the timeout structure is provided
    1522             :  *                      by the caller
    1523             :  *
    1524             :  * @lock:               the rt_mutex to be locked
    1525             :  * @timeout:            timeout structure or NULL (no timeout)
    1526             :  *
    1527             :  * Returns:
    1528             :  *  0           on success
    1529             :  * -EINTR       when interrupted by a signal
    1530             :  * -ETIMEDOUT   when the timeout expired
    1531             :  */
    1532             : int
    1533           0 : rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
    1534             : {
    1535           0 :         int ret;
    1536             : 
    1537           0 :         might_sleep();
    1538             : 
    1539           0 :         mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
    1540           0 :         ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
    1541             :                                        RT_MUTEX_MIN_CHAINWALK,
    1542             :                                        rt_mutex_slowlock);
    1543           0 :         if (ret)
    1544           0 :                 mutex_release(&lock->dep_map, _RET_IP_);
    1545             : 
    1546           0 :         return ret;
    1547             : }
    1548             : EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
    1549             : 
    1550             : /**
    1551             :  * rt_mutex_trylock - try to lock a rt_mutex
    1552             :  *
    1553             :  * @lock:       the rt_mutex to be locked
    1554             :  *
    1555             :  * This function can only be called in thread context. It's safe to
    1556             :  * call it from atomic regions, but not from hard interrupt or soft
    1557             :  * interrupt context.
    1558             :  *
    1559             :  * Returns 1 on success and 0 on contention
    1560             :  */
    1561           0 : int __sched rt_mutex_trylock(struct rt_mutex *lock)
    1562             : {
    1563           0 :         int ret;
    1564             : 
    1565           0 :         if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
    1566             :                 return 0;
    1567             : 
    1568           0 :         ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
    1569           0 :         if (ret)
    1570           0 :                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
    1571             : 
    1572             :         return ret;
    1573             : }
    1574             : EXPORT_SYMBOL_GPL(rt_mutex_trylock);
    1575             : 
    1576             : /**
    1577             :  * rt_mutex_unlock - unlock a rt_mutex
    1578             :  *
    1579             :  * @lock: the rt_mutex to be unlocked
    1580             :  */
    1581           0 : void __sched rt_mutex_unlock(struct rt_mutex *lock)
    1582             : {
    1583           0 :         mutex_release(&lock->dep_map, _RET_IP_);
    1584           0 :         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
    1585           0 : }
    1586             : EXPORT_SYMBOL_GPL(rt_mutex_unlock);
    1587             : 
    1588             : /**
    1589             :  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
    1590             :  * do not use the fast-path, can be simple and will not need to retry.
    1591             :  *
    1592             :  * @lock:       The rt_mutex to be unlocked
    1593             :  * @wake_q:     The wake queue head from which to get the next lock waiter
    1594             :  */
    1595           0 : bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
    1596             :                                     struct wake_q_head *wake_q)
    1597             : {
    1598           0 :         lockdep_assert_held(&lock->wait_lock);
    1599             : 
    1600           0 :         debug_rt_mutex_unlock(lock);
    1601             : 
    1602           0 :         if (!rt_mutex_has_waiters(lock)) {
    1603           0 :                 lock->owner = NULL;
    1604           0 :                 return false; /* done */
    1605             :         }
    1606             : 
    1607             :         /*
    1608             :          * We've already deboosted, mark_wakeup_next_waiter() will
    1609             :          * retain preempt_disabled when we drop the wait_lock, to
    1610             :          * avoid inversion prior to the wakeup.  preempt_disable()
    1611             :          * therein pairs with rt_mutex_postunlock().
    1612             :          */
    1613           0 :         mark_wakeup_next_waiter(wake_q, lock);
    1614             : 
    1615           0 :         return true; /* call postunlock() */
    1616             : }
    1617             : 
    1618           0 : void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
    1619             : {
    1620           0 :         DEFINE_WAKE_Q(wake_q);
    1621           0 :         unsigned long flags;
    1622           0 :         bool postunlock;
    1623             : 
    1624           0 :         raw_spin_lock_irqsave(&lock->wait_lock, flags);
    1625           0 :         postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
    1626           0 :         raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    1627             : 
    1628           0 :         if (postunlock)
    1629           0 :                 rt_mutex_postunlock(&wake_q);
    1630           0 : }
    1631             : 
    1632             : /**
    1633             :  * rt_mutex_destroy - mark a mutex unusable
    1634             :  * @lock: the mutex to be destroyed
    1635             :  *
    1636             :  * This function marks the mutex uninitialized, and any subsequent
    1637             :  * use of the mutex is forbidden. The mutex must not be locked when
    1638             :  * this function is called.
    1639             :  */
    1640           0 : void rt_mutex_destroy(struct rt_mutex *lock)
    1641             : {
    1642           0 :         WARN_ON(rt_mutex_is_locked(lock));
    1643             : #ifdef CONFIG_DEBUG_RT_MUTEXES
    1644           0 :         lock->magic = NULL;
    1645             : #endif
    1646           0 : }
    1647             : EXPORT_SYMBOL_GPL(rt_mutex_destroy);
    1648             : 
    1649             : /**
    1650             :  * __rt_mutex_init - initialize the rt_mutex
    1651             :  *
    1652             :  * @lock:       The rt_mutex to be initialized
    1653             :  * @name:       The lock name used for debugging
    1654             :  * @key:        The lock class key used for debugging
    1655             :  *
    1656             :  * Initialize the rt_mutex to unlocked state.
    1657             :  *
    1658             :  * Initializing of a locked rt_mutex is not allowed
    1659             :  */
    1660           0 : void __rt_mutex_init(struct rt_mutex *lock, const char *name,
    1661             :                      struct lock_class_key *key)
    1662             : {
    1663           0 :         lock->owner = NULL;
    1664           0 :         raw_spin_lock_init(&lock->wait_lock);
    1665           0 :         lock->waiters = RB_ROOT_CACHED;
    1666             : 
    1667           0 :         if (name && key)
    1668           0 :                 debug_rt_mutex_init(lock, name, key);
    1669           0 : }
    1670             : EXPORT_SYMBOL_GPL(__rt_mutex_init);
    1671             : 
    1672             : /**
    1673             :  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
    1674             :  *                              proxy owner
    1675             :  *
    1676             :  * @lock:       the rt_mutex to be locked
    1677             :  * @proxy_owner:the task to set as owner
    1678             :  *
    1679             :  * No locking. Caller has to do serializing itself
    1680             :  *
    1681             :  * Special API call for PI-futex support. This initializes the rtmutex and
    1682             :  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
    1683             :  * possible at this point because the pi_state which contains the rtmutex
    1684             :  * is not yet visible to other tasks.
    1685             :  */
    1686           0 : void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
    1687             :                                 struct task_struct *proxy_owner)
    1688             : {
    1689           0 :         __rt_mutex_init(lock, NULL, NULL);
    1690           0 :         debug_rt_mutex_proxy_lock(lock, proxy_owner);
    1691           0 :         rt_mutex_set_owner(lock, proxy_owner);
    1692           0 : }
    1693             : 
    1694             : /**
    1695             :  * rt_mutex_proxy_unlock - release a lock on behalf of owner
    1696             :  *
    1697             :  * @lock:       the rt_mutex to be locked
    1698             :  *
    1699             :  * No locking. Caller has to do serializing itself
    1700             :  *
    1701             :  * Special API call for PI-futex support. This merrily cleans up the rtmutex
    1702             :  * (debugging) state. Concurrent operations on this rt_mutex are not
    1703             :  * possible because it belongs to the pi_state which is about to be freed
    1704             :  * and it is not longer visible to other tasks.
    1705             :  */
    1706           0 : void rt_mutex_proxy_unlock(struct rt_mutex *lock)
    1707             : {
    1708           0 :         debug_rt_mutex_proxy_unlock(lock);
    1709           0 :         rt_mutex_set_owner(lock, NULL);
    1710           0 : }
    1711             : 
    1712             : /**
    1713             :  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
    1714             :  * @lock:               the rt_mutex to take
    1715             :  * @waiter:             the pre-initialized rt_mutex_waiter
    1716             :  * @task:               the task to prepare
    1717             :  *
    1718             :  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
    1719             :  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
    1720             :  *
    1721             :  * NOTE: does _NOT_ remove the @waiter on failure; must either call
    1722             :  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
    1723             :  *
    1724             :  * Returns:
    1725             :  *  0 - task blocked on lock
    1726             :  *  1 - acquired the lock for task, caller should wake it up
    1727             :  * <0 - error
    1728             :  *
    1729             :  * Special API call for PI-futex support.
    1730             :  */
    1731           0 : int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
    1732             :                               struct rt_mutex_waiter *waiter,
    1733             :                               struct task_struct *task)
    1734             : {
    1735           0 :         int ret;
    1736             : 
    1737           0 :         lockdep_assert_held(&lock->wait_lock);
    1738             : 
    1739           0 :         if (try_to_take_rt_mutex(lock, task, NULL))
    1740             :                 return 1;
    1741             : 
    1742             :         /* We enforce deadlock detection for futexes */
    1743           0 :         ret = task_blocks_on_rt_mutex(lock, waiter, task,
    1744             :                                       RT_MUTEX_FULL_CHAINWALK);
    1745             : 
    1746           0 :         if (ret && !rt_mutex_owner(lock)) {
    1747             :                 /*
    1748             :                  * Reset the return value. We might have
    1749             :                  * returned with -EDEADLK and the owner
    1750             :                  * released the lock while we were walking the
    1751             :                  * pi chain.  Let the waiter sort it out.
    1752             :                  */
    1753           0 :                 ret = 0;
    1754             :         }
    1755             : 
    1756           0 :         debug_rt_mutex_print_deadlock(waiter);
    1757             : 
    1758           0 :         return ret;
    1759             : }
    1760             : 
    1761             : /**
    1762             :  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
    1763             :  * @lock:               the rt_mutex to take
    1764             :  * @waiter:             the pre-initialized rt_mutex_waiter
    1765             :  * @task:               the task to prepare
    1766             :  *
    1767             :  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
    1768             :  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
    1769             :  *
    1770             :  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
    1771             :  * on failure.
    1772             :  *
    1773             :  * Returns:
    1774             :  *  0 - task blocked on lock
    1775             :  *  1 - acquired the lock for task, caller should wake it up
    1776             :  * <0 - error
    1777             :  *
    1778             :  * Special API call for PI-futex support.
    1779             :  */
    1780           0 : int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
    1781             :                               struct rt_mutex_waiter *waiter,
    1782             :                               struct task_struct *task)
    1783             : {
    1784           0 :         int ret;
    1785             : 
    1786           0 :         raw_spin_lock_irq(&lock->wait_lock);
    1787           0 :         ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
    1788           0 :         if (unlikely(ret))
    1789           0 :                 remove_waiter(lock, waiter);
    1790           0 :         raw_spin_unlock_irq(&lock->wait_lock);
    1791             : 
    1792           0 :         return ret;
    1793             : }
    1794             : 
    1795             : /**
    1796             :  * rt_mutex_next_owner - return the next owner of the lock
    1797             :  *
    1798             :  * @lock: the rt lock query
    1799             :  *
    1800             :  * Returns the next owner of the lock or NULL
    1801             :  *
    1802             :  * Caller has to serialize against other accessors to the lock
    1803             :  * itself.
    1804             :  *
    1805             :  * Special API call for PI-futex support
    1806             :  */
    1807           0 : struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
    1808             : {
    1809           0 :         if (!rt_mutex_has_waiters(lock))
    1810             :                 return NULL;
    1811             : 
    1812           0 :         return rt_mutex_top_waiter(lock)->task;
    1813             : }
    1814             : 
    1815             : /**
    1816             :  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
    1817             :  * @lock:               the rt_mutex we were woken on
    1818             :  * @to:                 the timeout, null if none. hrtimer should already have
    1819             :  *                      been started.
    1820             :  * @waiter:             the pre-initialized rt_mutex_waiter
    1821             :  *
    1822             :  * Wait for the lock acquisition started on our behalf by
    1823             :  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
    1824             :  * rt_mutex_cleanup_proxy_lock().
    1825             :  *
    1826             :  * Returns:
    1827             :  *  0 - success
    1828             :  * <0 - error, one of -EINTR, -ETIMEDOUT
    1829             :  *
    1830             :  * Special API call for PI-futex support
    1831             :  */
    1832           0 : int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
    1833             :                                struct hrtimer_sleeper *to,
    1834             :                                struct rt_mutex_waiter *waiter)
    1835             : {
    1836           0 :         int ret;
    1837             : 
    1838           0 :         raw_spin_lock_irq(&lock->wait_lock);
    1839             :         /* sleep on the mutex */
    1840           0 :         set_current_state(TASK_INTERRUPTIBLE);
    1841           0 :         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
    1842             :         /*
    1843             :          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
    1844             :          * have to fix that up.
    1845             :          */
    1846           0 :         fixup_rt_mutex_waiters(lock);
    1847           0 :         raw_spin_unlock_irq(&lock->wait_lock);
    1848             : 
    1849           0 :         return ret;
    1850             : }
    1851             : 
    1852             : /**
    1853             :  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
    1854             :  * @lock:               the rt_mutex we were woken on
    1855             :  * @waiter:             the pre-initialized rt_mutex_waiter
    1856             :  *
    1857             :  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
    1858             :  * rt_mutex_wait_proxy_lock().
    1859             :  *
    1860             :  * Unless we acquired the lock; we're still enqueued on the wait-list and can
    1861             :  * in fact still be granted ownership until we're removed. Therefore we can
    1862             :  * find we are in fact the owner and must disregard the
    1863             :  * rt_mutex_wait_proxy_lock() failure.
    1864             :  *
    1865             :  * Returns:
    1866             :  *  true  - did the cleanup, we done.
    1867             :  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
    1868             :  *          caller should disregards its return value.
    1869             :  *
    1870             :  * Special API call for PI-futex support
    1871             :  */
    1872           0 : bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
    1873             :                                  struct rt_mutex_waiter *waiter)
    1874             : {
    1875           0 :         bool cleanup = false;
    1876             : 
    1877           0 :         raw_spin_lock_irq(&lock->wait_lock);
    1878             :         /*
    1879             :          * Do an unconditional try-lock, this deals with the lock stealing
    1880             :          * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
    1881             :          * sets a NULL owner.
    1882             :          *
    1883             :          * We're not interested in the return value, because the subsequent
    1884             :          * test on rt_mutex_owner() will infer that. If the trylock succeeded,
    1885             :          * we will own the lock and it will have removed the waiter. If we
    1886             :          * failed the trylock, we're still not owner and we need to remove
    1887             :          * ourselves.
    1888             :          */
    1889           0 :         try_to_take_rt_mutex(lock, current, waiter);
    1890             :         /*
    1891             :          * Unless we're the owner; we're still enqueued on the wait_list.
    1892             :          * So check if we became owner, if not, take us off the wait_list.
    1893             :          */
    1894           0 :         if (rt_mutex_owner(lock) != current) {
    1895           0 :                 remove_waiter(lock, waiter);
    1896           0 :                 cleanup = true;
    1897             :         }
    1898             :         /*
    1899             :          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
    1900             :          * have to fix that up.
    1901             :          */
    1902           0 :         fixup_rt_mutex_waiters(lock);
    1903             : 
    1904           0 :         raw_spin_unlock_irq(&lock->wait_lock);
    1905             : 
    1906           0 :         return cleanup;
    1907             : }

Generated by: LCOV version 1.14