LCOV - code coverage report
Current view: top level - kernel/locking - mutex.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 190 383 49.6 %
Date: 2021-04-22 12:43:58 Functions: 17 31 54.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * kernel/locking/mutex.c
       4             :  *
       5             :  * Mutexes: blocking mutual exclusion locks
       6             :  *
       7             :  * Started by Ingo Molnar:
       8             :  *
       9             :  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
      10             :  *
      11             :  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
      12             :  * David Howells for suggestions and improvements.
      13             :  *
      14             :  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
      15             :  *    from the -rt tree, where it was originally implemented for rtmutexes
      16             :  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
      17             :  *    and Sven Dietrich.
      18             :  *
      19             :  * Also see Documentation/locking/mutex-design.rst.
      20             :  */
      21             : #include <linux/mutex.h>
      22             : #include <linux/ww_mutex.h>
      23             : #include <linux/sched/signal.h>
      24             : #include <linux/sched/rt.h>
      25             : #include <linux/sched/wake_q.h>
      26             : #include <linux/sched/debug.h>
      27             : #include <linux/export.h>
      28             : #include <linux/spinlock.h>
      29             : #include <linux/interrupt.h>
      30             : #include <linux/debug_locks.h>
      31             : #include <linux/osq_lock.h>
      32             : 
      33             : #ifdef CONFIG_DEBUG_MUTEXES
      34             : # include "mutex-debug.h"
      35             : #else
      36             : # include "mutex.h"
      37             : #endif
      38             : 
      39             : void
      40       75281 : __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
      41             : {
      42       75281 :         atomic_long_set(&lock->owner, 0);
      43       75281 :         spin_lock_init(&lock->wait_lock);
      44       75281 :         INIT_LIST_HEAD(&lock->wait_list);
      45             : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
      46       75281 :         osq_lock_init(&lock->osq);
      47             : #endif
      48             : 
      49       75281 :         debug_mutex_init(lock, name, key);
      50       75282 : }
      51             : EXPORT_SYMBOL(__mutex_init);
      52             : 
      53             : /*
      54             :  * @owner: contains: 'struct task_struct *' to the current lock owner,
      55             :  * NULL means not owned. Since task_struct pointers are aligned at
      56             :  * at least L1_CACHE_BYTES, we have low bits to store extra state.
      57             :  *
      58             :  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
      59             :  * Bit1 indicates unlock needs to hand the lock to the top-waiter
      60             :  * Bit2 indicates handoff has been done and we're waiting for pickup.
      61             :  */
      62             : #define MUTEX_FLAG_WAITERS      0x01
      63             : #define MUTEX_FLAG_HANDOFF      0x02
      64             : #define MUTEX_FLAG_PICKUP       0x04
      65             : 
      66             : #define MUTEX_FLAGS             0x07
      67             : 
      68             : /*
      69             :  * Internal helper function; C doesn't allow us to hide it :/
      70             :  *
      71             :  * DO NOT USE (outside of mutex code).
      72             :  */
      73      114908 : static inline struct task_struct *__mutex_owner(struct mutex *lock)
      74             : {
      75      229816 :         return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
      76             : }
      77             : 
      78      129222 : static inline struct task_struct *__owner_task(unsigned long owner)
      79             : {
      80      129222 :         return (struct task_struct *)(owner & ~MUTEX_FLAGS);
      81             : }
      82             : 
      83         413 : bool mutex_is_locked(struct mutex *lock)
      84             : {
      85         413 :         return __mutex_owner(lock) != NULL;
      86             : }
      87             : EXPORT_SYMBOL(mutex_is_locked);
      88             : 
      89      257617 : static inline unsigned long __owner_flags(unsigned long owner)
      90             : {
      91      257617 :         return owner & MUTEX_FLAGS;
      92             : }
      93             : 
      94             : /*
      95             :  * Trylock variant that retuns the owning task on failure.
      96             :  */
      97      129218 : static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
      98             : {
      99      129218 :         unsigned long owner, curr = (unsigned long)current;
     100             : 
     101      129218 :         owner = atomic_long_read(&lock->owner);
     102      129274 :         for (;;) { /* must loop, can race against a flag */
     103      129274 :                 unsigned long old, flags = __owner_flags(owner);
     104      129274 :                 unsigned long task = owner & ~MUTEX_FLAGS;
     105             : 
     106      129274 :                 if (task) {
     107         878 :                         if (likely(task != curr))
     108             :                                 break;
     109             : 
     110           0 :                         if (likely(!(flags & MUTEX_FLAG_PICKUP)))
     111             :                                 break;
     112             : 
     113           0 :                         flags &= ~MUTEX_FLAG_PICKUP;
     114             :                 } else {
     115             : #ifdef CONFIG_DEBUG_MUTEXES
     116      128396 :                         DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
     117             : #endif
     118             :                 }
     119             : 
     120             :                 /*
     121             :                  * We set the HANDOFF bit, we must make sure it doesn't live
     122             :                  * past the point where we acquire it. This would be possible
     123             :                  * if we (accidentally) set the bit on an unlocked mutex.
     124             :                  */
     125      128396 :                 flags &= ~MUTEX_FLAG_HANDOFF;
     126             : 
     127      128396 :                 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
     128      128424 :                 if (old == owner)
     129             :                         return NULL;
     130             : 
     131             :                 owner = old;
     132             :         }
     133             : 
     134         878 :         return __owner_task(owner);
     135             : }
     136             : 
     137             : /*
     138             :  * Actual trylock that will work on any unlocked state.
     139             :  */
     140      128418 : static inline bool __mutex_trylock(struct mutex *lock)
     141             : {
     142      128138 :         return !__mutex_trylock_or_owner(lock);
     143             : }
     144             : 
     145             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     146             : /*
     147             :  * Lockdep annotations are contained to the slow paths for simplicity.
     148             :  * There is nothing that would stop spreading the lockdep annotations outwards
     149             :  * except more code.
     150             :  */
     151             : 
     152             : /*
     153             :  * Optimistic trylock that only works in the uncontended case. Make sure to
     154             :  * follow with a __mutex_trylock() before failing.
     155             :  */
     156             : static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
     157             : {
     158             :         unsigned long curr = (unsigned long)current;
     159             :         unsigned long zero = 0UL;
     160             : 
     161             :         if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
     162             :                 return true;
     163             : 
     164             :         return false;
     165             : }
     166             : 
     167             : static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
     168             : {
     169             :         unsigned long curr = (unsigned long)current;
     170             : 
     171             :         if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
     172             :                 return true;
     173             : 
     174             :         return false;
     175             : }
     176             : #endif
     177             : 
     178          45 : static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
     179             : {
     180          45 :         atomic_long_or(flag, &lock->owner);
     181          45 : }
     182             : 
     183          23 : static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
     184             : {
     185          23 :         atomic_long_andnot(flag, &lock->owner);
     186          23 : }
     187             : 
     188          45 : static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
     189             : {
     190          45 :         return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
     191             : }
     192             : 
     193             : /*
     194             :  * Add @waiter to a given location in the lock wait_list and set the
     195             :  * FLAG_WAITERS flag if it's the first waiter.
     196             :  */
     197             : static void __sched
     198          23 : __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
     199             :                    struct list_head *list)
     200             : {
     201          23 :         debug_mutex_add_waiter(lock, waiter, current);
     202             : 
     203          23 :         list_add_tail(&waiter->list, list);
     204          23 :         if (__mutex_waiter_is_first(lock, waiter))
     205          23 :                 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
     206          23 : }
     207             : 
     208             : /*
     209             :  * Give up ownership to a specific task, when @task = NULL, this is equivalent
     210             :  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
     211             :  * WAITERS. Provides RELEASE semantics like a regular unlock, the
     212             :  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
     213             :  */
     214           0 : static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
     215             : {
     216           0 :         unsigned long owner = atomic_long_read(&lock->owner);
     217             : 
     218           0 :         for (;;) {
     219           0 :                 unsigned long old, new;
     220             : 
     221             : #ifdef CONFIG_DEBUG_MUTEXES
     222           0 :                 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
     223           0 :                 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
     224             : #endif
     225             : 
     226           0 :                 new = (owner & MUTEX_FLAG_WAITERS);
     227           0 :                 new |= (unsigned long)task;
     228           0 :                 if (task)
     229           0 :                         new |= MUTEX_FLAG_PICKUP;
     230             : 
     231           0 :                 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
     232           0 :                 if (old == owner)
     233             :                         break;
     234             : 
     235             :                 owner = old;
     236             :         }
     237           0 : }
     238             : 
     239             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     240             : /*
     241             :  * We split the mutex lock/unlock logic into separate fastpath and
     242             :  * slowpath functions, to reduce the register pressure on the fastpath.
     243             :  * We also put the fastpath first in the kernel image, to make sure the
     244             :  * branch is predicted by the CPU as default-untaken.
     245             :  */
     246             : static void __sched __mutex_lock_slowpath(struct mutex *lock);
     247             : 
     248             : /**
     249             :  * mutex_lock - acquire the mutex
     250             :  * @lock: the mutex to be acquired
     251             :  *
     252             :  * Lock the mutex exclusively for this task. If the mutex is not
     253             :  * available right now, it will sleep until it can get it.
     254             :  *
     255             :  * The mutex must later on be released by the same task that
     256             :  * acquired it. Recursive locking is not allowed. The task
     257             :  * may not exit without first unlocking the mutex. Also, kernel
     258             :  * memory where the mutex resides must not be freed with
     259             :  * the mutex still locked. The mutex must first be initialized
     260             :  * (or statically defined) before it can be locked. memset()-ing
     261             :  * the mutex to 0 is not allowed.
     262             :  *
     263             :  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
     264             :  * checks that will enforce the restrictions and will also do
     265             :  * deadlock debugging)
     266             :  *
     267             :  * This function is similar to (but not equivalent to) down().
     268             :  */
     269             : void __sched mutex_lock(struct mutex *lock)
     270             : {
     271             :         might_sleep();
     272             : 
     273             :         if (!__mutex_trylock_fast(lock))
     274             :                 __mutex_lock_slowpath(lock);
     275             : }
     276             : EXPORT_SYMBOL(mutex_lock);
     277             : #endif
     278             : 
     279             : /*
     280             :  * Wait-Die:
     281             :  *   The newer transactions are killed when:
     282             :  *     It (the new transaction) makes a request for a lock being held
     283             :  *     by an older transaction.
     284             :  *
     285             :  * Wound-Wait:
     286             :  *   The newer transactions are wounded when:
     287             :  *     An older transaction makes a request for a lock being held by
     288             :  *     the newer transaction.
     289             :  */
     290             : 
     291             : /*
     292             :  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
     293             :  * it.
     294             :  */
     295             : static __always_inline void
     296           0 : ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
     297             : {
     298             : #ifdef CONFIG_DEBUG_MUTEXES
     299             :         /*
     300             :          * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
     301             :          * but released with a normal mutex_unlock in this call.
     302             :          *
     303             :          * This should never happen, always use ww_mutex_unlock.
     304             :          */
     305           0 :         DEBUG_LOCKS_WARN_ON(ww->ctx);
     306             : 
     307             :         /*
     308             :          * Not quite done after calling ww_acquire_done() ?
     309             :          */
     310           0 :         DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
     311             : 
     312           0 :         if (ww_ctx->contending_lock) {
     313             :                 /*
     314             :                  * After -EDEADLK you tried to
     315             :                  * acquire a different ww_mutex? Bad!
     316             :                  */
     317           0 :                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
     318             : 
     319             :                 /*
     320             :                  * You called ww_mutex_lock after receiving -EDEADLK,
     321             :                  * but 'forgot' to unlock everything else first?
     322             :                  */
     323           0 :                 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
     324           0 :                 ww_ctx->contending_lock = NULL;
     325             :         }
     326             : 
     327             :         /*
     328             :          * Naughty, using a different class will lead to undefined behavior!
     329             :          */
     330           0 :         DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
     331             : #endif
     332           0 :         ww_ctx->acquired++;
     333           0 :         ww->ctx = ww_ctx;
     334           0 : }
     335             : 
     336             : /*
     337             :  * Determine if context @a is 'after' context @b. IOW, @a is a younger
     338             :  * transaction than @b and depending on algorithm either needs to wait for
     339             :  * @b or die.
     340             :  */
     341             : static inline bool __sched
     342           0 : __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
     343             : {
     344             : 
     345           0 :         return (signed long)(a->stamp - b->stamp) > 0;
     346             : }
     347             : 
     348             : /*
     349             :  * Wait-Die; wake a younger waiter context (when locks held) such that it can
     350             :  * die.
     351             :  *
     352             :  * Among waiters with context, only the first one can have other locks acquired
     353             :  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
     354             :  * __ww_mutex_check_kill() wake any but the earliest context.
     355             :  */
     356             : static bool __sched
     357           0 : __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
     358             :                struct ww_acquire_ctx *ww_ctx)
     359             : {
     360           0 :         if (!ww_ctx->is_wait_die)
     361             :                 return false;
     362             : 
     363           0 :         if (waiter->ww_ctx->acquired > 0 &&
     364           0 :                         __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
     365           0 :                 debug_mutex_wake_waiter(lock, waiter);
     366           0 :                 wake_up_process(waiter->task);
     367             :         }
     368             : 
     369             :         return true;
     370             : }
     371             : 
     372             : /*
     373             :  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
     374             :  *
     375             :  * Wound the lock holder if there are waiters with older transactions than
     376             :  * the lock holders. Even if multiple waiters may wound the lock holder,
     377             :  * it's sufficient that only one does.
     378             :  */
     379           0 : static bool __ww_mutex_wound(struct mutex *lock,
     380             :                              struct ww_acquire_ctx *ww_ctx,
     381             :                              struct ww_acquire_ctx *hold_ctx)
     382             : {
     383           0 :         struct task_struct *owner = __mutex_owner(lock);
     384             : 
     385           0 :         lockdep_assert_held(&lock->wait_lock);
     386             : 
     387             :         /*
     388             :          * Possible through __ww_mutex_add_waiter() when we race with
     389             :          * ww_mutex_set_context_fastpath(). In that case we'll get here again
     390             :          * through __ww_mutex_check_waiters().
     391             :          */
     392           0 :         if (!hold_ctx)
     393             :                 return false;
     394             : 
     395             :         /*
     396             :          * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
     397             :          * it cannot go away because we'll have FLAG_WAITERS set and hold
     398             :          * wait_lock.
     399             :          */
     400           0 :         if (!owner)
     401             :                 return false;
     402             : 
     403           0 :         if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
     404           0 :                 hold_ctx->wounded = 1;
     405             : 
     406             :                 /*
     407             :                  * wake_up_process() paired with set_current_state()
     408             :                  * inserts sufficient barriers to make sure @owner either sees
     409             :                  * it's wounded in __ww_mutex_check_kill() or has a
     410             :                  * wakeup pending to re-read the wounded state.
     411             :                  */
     412           0 :                 if (owner != current)
     413           0 :                         wake_up_process(owner);
     414             : 
     415           0 :                 return true;
     416             :         }
     417             : 
     418             :         return false;
     419             : }
     420             : 
     421             : /*
     422             :  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
     423             :  * behind us on the wait-list, check if they need to die, or wound us.
     424             :  *
     425             :  * See __ww_mutex_add_waiter() for the list-order construction; basically the
     426             :  * list is ordered by stamp, smallest (oldest) first.
     427             :  *
     428             :  * This relies on never mixing wait-die/wound-wait on the same wait-list;
     429             :  * which is currently ensured by that being a ww_class property.
     430             :  *
     431             :  * The current task must not be on the wait list.
     432             :  */
     433             : static void __sched
     434           0 : __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
     435             : {
     436           0 :         struct mutex_waiter *cur;
     437             : 
     438           0 :         lockdep_assert_held(&lock->wait_lock);
     439             : 
     440           0 :         list_for_each_entry(cur, &lock->wait_list, list) {
     441           0 :                 if (!cur->ww_ctx)
     442           0 :                         continue;
     443             : 
     444           0 :                 if (__ww_mutex_die(lock, cur, ww_ctx) ||
     445           0 :                     __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
     446             :                         break;
     447             :         }
     448           0 : }
     449             : 
     450             : /*
     451             :  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
     452             :  * and wake up any waiters so they can recheck.
     453             :  */
     454             : static __always_inline void
     455           0 : ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     456             : {
     457           0 :         ww_mutex_lock_acquired(lock, ctx);
     458             : 
     459             :         /*
     460             :          * The lock->ctx update should be visible on all cores before
     461             :          * the WAITERS check is done, otherwise contended waiters might be
     462             :          * missed. The contended waiters will either see ww_ctx == NULL
     463             :          * and keep spinning, or it will acquire wait_lock, add itself
     464             :          * to waiter list and sleep.
     465             :          */
     466           0 :         smp_mb(); /* See comments above and below. */
     467             : 
     468             :         /*
     469             :          * [W] ww->ctx = ctx     [W] MUTEX_FLAG_WAITERS
     470             :          *     MB                       MB
     471             :          * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
     472             :          *
     473             :          * The memory barrier above pairs with the memory barrier in
     474             :          * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
     475             :          * and/or !empty list.
     476             :          */
     477           0 :         if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
     478             :                 return;
     479             : 
     480             :         /*
     481             :          * Uh oh, we raced in fastpath, check if any of the waiters need to
     482             :          * die or wound us.
     483             :          */
     484           0 :         spin_lock(&lock->base.wait_lock);
     485           0 :         __ww_mutex_check_waiters(&lock->base, ctx);
     486           0 :         spin_unlock(&lock->base.wait_lock);
     487             : }
     488             : 
     489             : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
     490             : 
     491             : static inline
     492           0 : bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     493             :                             struct mutex_waiter *waiter)
     494             : {
     495           0 :         struct ww_mutex *ww;
     496             : 
     497           0 :         ww = container_of(lock, struct ww_mutex, base);
     498             : 
     499             :         /*
     500             :          * If ww->ctx is set the contents are undefined, only
     501             :          * by acquiring wait_lock there is a guarantee that
     502             :          * they are not invalid when reading.
     503             :          *
     504             :          * As such, when deadlock detection needs to be
     505             :          * performed the optimistic spinning cannot be done.
     506             :          *
     507             :          * Check this in every inner iteration because we may
     508             :          * be racing against another thread's ww_mutex_lock.
     509             :          */
     510           0 :         if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
     511             :                 return false;
     512             : 
     513             :         /*
     514             :          * If we aren't on the wait list yet, cancel the spin
     515             :          * if there are waiters. We want  to avoid stealing the
     516             :          * lock from a waiter with an earlier stamp, since the
     517             :          * other thread may already own a lock that we also
     518             :          * need.
     519             :          */
     520           0 :         if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
     521             :                 return false;
     522             : 
     523             :         /*
     524             :          * Similarly, stop spinning if we are no longer the
     525             :          * first waiter.
     526             :          */
     527           0 :         if (waiter && !__mutex_waiter_is_first(lock, waiter))
     528           0 :                 return false;
     529             : 
     530             :         return true;
     531             : }
     532             : 
     533             : /*
     534             :  * Look out! "owner" is an entirely speculative pointer access and not
     535             :  * reliable.
     536             :  *
     537             :  * "noinline" so that this function shows up on perf profiles.
     538             :  */
     539             : static noinline
     540         221 : bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
     541             :                          struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
     542             : {
     543         221 :         bool ret = true;
     544             : 
     545         221 :         rcu_read_lock();
     546      113892 :         while (__mutex_owner(lock) == owner) {
     547             :                 /*
     548             :                  * Ensure we emit the owner->on_cpu, dereference _after_
     549             :                  * checking lock->owner still matches owner. If that fails,
     550             :                  * owner might point to freed memory. If it still matches,
     551             :                  * the rcu_read_lock() ensures the memory stays valid.
     552             :                  */
     553      113680 :                 barrier();
     554             : 
     555             :                 /*
     556             :                  * Use vcpu_is_preempted to detect lock holder preemption issue.
     557             :                  */
     558      341034 :                 if (!owner->on_cpu || need_resched() ||
     559      113676 :                                 vcpu_is_preempted(task_cpu(owner))) {
     560             :                         ret = false;
     561             :                         break;
     562             :                 }
     563             : 
     564      113671 :                 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
     565             :                         ret = false;
     566             :                         break;
     567             :                 }
     568             : 
     569      113671 :                 cpu_relax();
     570             :         }
     571         221 :         rcu_read_unlock();
     572             : 
     573         221 :         return ret;
     574             : }
     575             : 
     576             : /*
     577             :  * Initial check for entering the mutex spinning loop
     578             :  */
     579         612 : static inline int mutex_can_spin_on_owner(struct mutex *lock)
     580             : {
     581         612 :         struct task_struct *owner;
     582         612 :         int retval = 1;
     583             : 
     584         612 :         if (need_resched())
     585             :                 return 0;
     586             : 
     587         603 :         rcu_read_lock();
     588         603 :         owner = __mutex_owner(lock);
     589             : 
     590             :         /*
     591             :          * As lock holder preemption issue, we both skip spinning if task is not
     592             :          * on cpu or its cpu is preempted
     593             :          */
     594         603 :         if (owner)
     595         272 :                 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
     596         603 :         rcu_read_unlock();
     597             : 
     598             :         /*
     599             :          * If lock->owner is not set, the mutex has been released. Return true
     600             :          * such that we'll trylock in the spin path, which is a faster option
     601             :          * than the blocking slow path.
     602             :          */
     603         603 :         return retval;
     604             : }
     605             : 
     606             : /*
     607             :  * Optimistic spinning.
     608             :  *
     609             :  * We try to spin for acquisition when we find that the lock owner
     610             :  * is currently running on a (different) CPU and while we don't
     611             :  * need to reschedule. The rationale is that if the lock owner is
     612             :  * running, it is likely to release the lock soon.
     613             :  *
     614             :  * The mutex spinners are queued up using MCS lock so that only one
     615             :  * spinner can compete for the mutex. However, if mutex spinning isn't
     616             :  * going to happen, there is no point in going through the lock/unlock
     617             :  * overhead.
     618             :  *
     619             :  * Returns true when the lock was taken, otherwise false, indicating
     620             :  * that we need to jump to the slowpath and sleep.
     621             :  *
     622             :  * The waiter flag is set to true if the spinner is a waiter in the wait
     623             :  * queue. The waiter-spinner will spin on the lock directly and concurrently
     624             :  * with the spinner at the head of the OSQ, if present, until the owner is
     625             :  * changed to itself.
     626             :  */
     627             : static __always_inline bool
     628         612 : mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     629             :                       const bool use_ww_ctx, struct mutex_waiter *waiter)
     630             : {
     631         612 :         if (!waiter) {
     632             :                 /*
     633             :                  * The purpose of the mutex_can_spin_on_owner() function is
     634             :                  * to eliminate the overhead of osq_lock() and osq_unlock()
     635             :                  * in case spinning isn't possible. As a waiter-spinner
     636             :                  * is not going to take OSQ lock anyway, there is no need
     637             :                  * to call mutex_can_spin_on_owner().
     638             :                  */
     639         612 :                 if (!mutex_can_spin_on_owner(lock))
     640          24 :                         goto fail;
     641             : 
     642             :                 /*
     643             :                  * In order to avoid a stampede of mutex spinners trying to
     644             :                  * acquire the mutex all at once, the spinners need to take a
     645             :                  * MCS (queued) lock first before spinning on the owner field.
     646             :                  */
     647         588 :                 if (!osq_lock(&lock->osq))
     648           0 :                         goto fail;
     649             :         }
     650             : 
     651         800 :         for (;;) {
     652         800 :                 struct task_struct *owner;
     653             : 
     654             :                 /* Try to acquire the mutex... */
     655         800 :                 owner = __mutex_trylock_or_owner(lock);
     656         800 :                 if (!owner)
     657             :                         break;
     658             : 
     659             :                 /*
     660             :                  * There's an owner, wait for it to either
     661             :                  * release the lock or go to sleep.
     662             :                  */
     663         221 :                 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
     664           9 :                         goto fail_unlock;
     665             : 
     666             :                 /*
     667             :                  * The cpu_relax() call is a compiler barrier which forces
     668             :                  * everything in this loop to be re-loaded. We don't need
     669             :                  * memory barriers as we'll eventually observe the right
     670             :                  * values at the cost of a few extra spins.
     671             :                  */
     672         212 :                 cpu_relax();
     673             :         }
     674             : 
     675         579 :         if (!waiter)
     676         579 :                 osq_unlock(&lock->osq);
     677             : 
     678             :         return true;
     679             : 
     680             : 
     681           9 : fail_unlock:
     682           9 :         if (!waiter)
     683           9 :                 osq_unlock(&lock->osq);
     684             : 
     685          33 : fail:
     686             :         /*
     687             :          * If we fell out of the spin path because of need_resched(),
     688             :          * reschedule now, before we try-lock the mutex. This avoids getting
     689             :          * scheduled out right after we obtained the mutex.
     690             :          */
     691          33 :         if (need_resched()) {
     692             :                 /*
     693             :                  * We _should_ have TASK_RUNNING here, but just in case
     694             :                  * we do not, make it so, otherwise we might get stuck.
     695             :                  */
     696          11 :                 __set_current_state(TASK_RUNNING);
     697          11 :                 schedule_preempt_disabled();
     698             :         }
     699             : 
     700             :         return false;
     701             : }
     702             : #else
     703             : static __always_inline bool
     704             : mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
     705             :                       const bool use_ww_ctx, struct mutex_waiter *waiter)
     706             : {
     707             :         return false;
     708             : }
     709             : #endif
     710             : 
     711             : static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
     712             : 
     713             : /**
     714             :  * mutex_unlock - release the mutex
     715             :  * @lock: the mutex to be released
     716             :  *
     717             :  * Unlock a mutex that has been locked by this task previously.
     718             :  *
     719             :  * This function must not be used in interrupt context. Unlocking
     720             :  * of a not locked mutex is not allowed.
     721             :  *
     722             :  * This function is similar to (but not equivalent to) up().
     723             :  */
     724      128348 : void __sched mutex_unlock(struct mutex *lock)
     725             : {
     726             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     727             :         if (__mutex_unlock_fast(lock))
     728             :                 return;
     729             : #endif
     730      128348 :         __mutex_unlock_slowpath(lock, _RET_IP_);
     731      128358 : }
     732             : EXPORT_SYMBOL(mutex_unlock);
     733             : 
     734             : /**
     735             :  * ww_mutex_unlock - release the w/w mutex
     736             :  * @lock: the mutex to be released
     737             :  *
     738             :  * Unlock a mutex that has been locked by this task previously with any of the
     739             :  * ww_mutex_lock* functions (with or without an acquire context). It is
     740             :  * forbidden to release the locks after releasing the acquire context.
     741             :  *
     742             :  * This function must not be used in interrupt context. Unlocking
     743             :  * of a unlocked mutex is not allowed.
     744             :  */
     745           0 : void __sched ww_mutex_unlock(struct ww_mutex *lock)
     746             : {
     747             :         /*
     748             :          * The unlocking fastpath is the 0->1 transition from 'locked'
     749             :          * into 'unlocked' state:
     750             :          */
     751           0 :         if (lock->ctx) {
     752             : #ifdef CONFIG_DEBUG_MUTEXES
     753           0 :                 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
     754             : #endif
     755           0 :                 if (lock->ctx->acquired > 0)
     756           0 :                         lock->ctx->acquired--;
     757           0 :                 lock->ctx = NULL;
     758             :         }
     759             : 
     760           0 :         mutex_unlock(&lock->base);
     761           0 : }
     762             : EXPORT_SYMBOL(ww_mutex_unlock);
     763             : 
     764             : 
     765             : static __always_inline int __sched
     766           0 : __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
     767             : {
     768           0 :         if (ww_ctx->acquired > 0) {
     769             : #ifdef CONFIG_DEBUG_MUTEXES
     770           0 :                 struct ww_mutex *ww;
     771             : 
     772           0 :                 ww = container_of(lock, struct ww_mutex, base);
     773           0 :                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
     774           0 :                 ww_ctx->contending_lock = ww;
     775             : #endif
     776           0 :                 return -EDEADLK;
     777             :         }
     778             : 
     779             :         return 0;
     780             : }
     781             : 
     782             : 
     783             : /*
     784             :  * Check the wound condition for the current lock acquire.
     785             :  *
     786             :  * Wound-Wait: If we're wounded, kill ourself.
     787             :  *
     788             :  * Wait-Die: If we're trying to acquire a lock already held by an older
     789             :  *           context, kill ourselves.
     790             :  *
     791             :  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
     792             :  * look at waiters before us in the wait-list.
     793             :  */
     794             : static inline int __sched
     795           0 : __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
     796             :                       struct ww_acquire_ctx *ctx)
     797             : {
     798           0 :         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
     799           0 :         struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
     800           0 :         struct mutex_waiter *cur;
     801             : 
     802           0 :         if (ctx->acquired == 0)
     803             :                 return 0;
     804             : 
     805           0 :         if (!ctx->is_wait_die) {
     806           0 :                 if (ctx->wounded)
     807           0 :                         return __ww_mutex_kill(lock, ctx);
     808             : 
     809             :                 return 0;
     810             :         }
     811             : 
     812           0 :         if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
     813           0 :                 return __ww_mutex_kill(lock, ctx);
     814             : 
     815             :         /*
     816             :          * If there is a waiter in front of us that has a context, then its
     817             :          * stamp is earlier than ours and we must kill ourself.
     818             :          */
     819           0 :         cur = waiter;
     820           0 :         list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
     821           0 :                 if (!cur->ww_ctx)
     822           0 :                         continue;
     823             : 
     824           0 :                 return __ww_mutex_kill(lock, ctx);
     825             :         }
     826             : 
     827             :         return 0;
     828             : }
     829             : 
     830             : /*
     831             :  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
     832             :  * first. Such that older contexts are preferred to acquire the lock over
     833             :  * younger contexts.
     834             :  *
     835             :  * Waiters without context are interspersed in FIFO order.
     836             :  *
     837             :  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
     838             :  * older contexts already waiting) to avoid unnecessary waiting and for
     839             :  * Wound-Wait ensure we wound the owning context when it is younger.
     840             :  */
     841             : static inline int __sched
     842           0 : __ww_mutex_add_waiter(struct mutex_waiter *waiter,
     843             :                       struct mutex *lock,
     844             :                       struct ww_acquire_ctx *ww_ctx)
     845             : {
     846           0 :         struct mutex_waiter *cur;
     847           0 :         struct list_head *pos;
     848           0 :         bool is_wait_die;
     849             : 
     850           0 :         if (!ww_ctx) {
     851           0 :                 __mutex_add_waiter(lock, waiter, &lock->wait_list);
     852           0 :                 return 0;
     853             :         }
     854             : 
     855           0 :         is_wait_die = ww_ctx->is_wait_die;
     856             : 
     857             :         /*
     858             :          * Add the waiter before the first waiter with a higher stamp.
     859             :          * Waiters without a context are skipped to avoid starving
     860             :          * them. Wait-Die waiters may die here. Wound-Wait waiters
     861             :          * never die here, but they are sorted in stamp order and
     862             :          * may wound the lock holder.
     863             :          */
     864           0 :         pos = &lock->wait_list;
     865           0 :         list_for_each_entry_reverse(cur, &lock->wait_list, list) {
     866           0 :                 if (!cur->ww_ctx)
     867           0 :                         continue;
     868             : 
     869           0 :                 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
     870             :                         /*
     871             :                          * Wait-Die: if we find an older context waiting, there
     872             :                          * is no point in queueing behind it, as we'd have to
     873             :                          * die the moment it would acquire the lock.
     874             :                          */
     875           0 :                         if (is_wait_die) {
     876           0 :                                 int ret = __ww_mutex_kill(lock, ww_ctx);
     877             : 
     878           0 :                                 if (ret)
     879           0 :                                         return ret;
     880             :                         }
     881             : 
     882             :                         break;
     883             :                 }
     884             : 
     885           0 :                 pos = &cur->list;
     886             : 
     887             :                 /* Wait-Die: ensure younger waiters die. */
     888           0 :                 __ww_mutex_die(lock, cur, ww_ctx);
     889             :         }
     890             : 
     891           0 :         __mutex_add_waiter(lock, waiter, pos);
     892             : 
     893             :         /*
     894             :          * Wound-Wait: if we're blocking on a mutex owned by a younger context,
     895             :          * wound that such that we might proceed.
     896             :          */
     897           0 :         if (!is_wait_die) {
     898           0 :                 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
     899             : 
     900             :                 /*
     901             :                  * See ww_mutex_set_context_fastpath(). Orders setting
     902             :                  * MUTEX_FLAG_WAITERS vs the ww->ctx load,
     903             :                  * such that either we or the fastpath will wound @ww->ctx.
     904             :                  */
     905           0 :                 smp_mb();
     906           0 :                 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
     907             :         }
     908             : 
     909             :         return 0;
     910             : }
     911             : 
     912             : /*
     913             :  * Lock a mutex (possibly interruptible), slowpath:
     914             :  */
     915             : static __always_inline int __sched
     916      128068 : __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
     917             :                     struct lockdep_map *nest_lock, unsigned long ip,
     918             :                     struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
     919             : {
     920      128068 :         struct mutex_waiter waiter;
     921      128068 :         bool first = false;
     922      128068 :         struct ww_mutex *ww;
     923      128068 :         int ret;
     924             : 
     925      256142 :         might_sleep();
     926             : 
     927             : #ifdef CONFIG_DEBUG_MUTEXES
     928      128074 :         DEBUG_LOCKS_WARN_ON(lock->magic != lock);
     929             : #endif
     930             : 
     931      128074 :         ww = container_of(lock, struct ww_mutex, base);
     932           0 :         if (use_ww_ctx && ww_ctx) {
     933           0 :                 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
     934             :                         return -EALREADY;
     935             : 
     936             :                 /*
     937             :                  * Reset the wounded flag after a kill. No other process can
     938             :                  * race and wound us here since they can't have a valid owner
     939             :                  * pointer if we don't have any locks held.
     940             :                  */
     941           0 :                 if (ww_ctx->acquired == 0)
     942           0 :                         ww_ctx->wounded = 0;
     943             :         }
     944             : 
     945      128074 :         preempt_disable();
     946      128074 :         mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
     947             : 
     948      128062 :         if (__mutex_trylock(lock) ||
     949        1202 :             mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
     950             :                 /* got the lock, yay! */
     951      128046 :                 lock_acquired(&lock->dep_map, ip);
     952           0 :                 if (use_ww_ctx && ww_ctx)
     953           0 :                         ww_mutex_set_context_fastpath(ww, ww_ctx);
     954      128046 :                 preempt_enable();
     955      128050 :                 return 0;
     956             :         }
     957             : 
     958          33 :         spin_lock(&lock->wait_lock);
     959             :         /*
     960             :          * After waiting to acquire the wait_lock, try again.
     961             :          */
     962          33 :         if (__mutex_trylock(lock)) {
     963           0 :                 if (use_ww_ctx && ww_ctx)
     964           0 :                         __ww_mutex_check_waiters(lock, ww_ctx);
     965             : 
     966          10 :                 goto skip_wait;
     967             :         }
     968             : 
     969          23 :         debug_mutex_lock_common(lock, &waiter);
     970             : 
     971          23 :         lock_contended(&lock->dep_map, ip);
     972             : 
     973          23 :         if (!use_ww_ctx) {
     974             :                 /* add waiting tasks to the end of the waitqueue (FIFO): */
     975          23 :                 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
     976             : 
     977             : 
     978             : #ifdef CONFIG_DEBUG_MUTEXES
     979          23 :                 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
     980             : #endif
     981             :         } else {
     982             :                 /*
     983             :                  * Add in stamp order, waking up waiters that must kill
     984             :                  * themselves.
     985             :                  */
     986           0 :                 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
     987           0 :                 if (ret)
     988           0 :                         goto err_early_kill;
     989             : 
     990           0 :                 waiter.ww_ctx = ww_ctx;
     991             :         }
     992             : 
     993          23 :         waiter.task = current;
     994             : 
     995          23 :         set_current_state(state);
     996          23 :         for (;;) {
     997             :                 /*
     998             :                  * Once we hold wait_lock, we're serialized against
     999             :                  * mutex_unlock() handing the lock off to us, do a trylock
    1000             :                  * before testing the error conditions to make sure we pick up
    1001             :                  * the handoff.
    1002             :                  */
    1003          23 :                 if (__mutex_trylock(lock))
    1004           1 :                         goto acquired;
    1005             : 
    1006             :                 /*
    1007             :                  * Check for signals and kill conditions while holding
    1008             :                  * wait_lock. This ensures the lock cancellation is ordered
    1009             :                  * against mutex_unlock() and wake-ups do not go missing.
    1010             :                  */
    1011          22 :                 if (signal_pending_state(state, current)) {
    1012           0 :                         ret = -EINTR;
    1013           0 :                         goto err;
    1014             :                 }
    1015             : 
    1016           0 :                 if (use_ww_ctx && ww_ctx) {
    1017           0 :                         ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
    1018           0 :                         if (ret)
    1019           0 :                                 goto err;
    1020             :                 }
    1021             : 
    1022          22 :                 spin_unlock(&lock->wait_lock);
    1023          22 :                 schedule_preempt_disabled();
    1024             : 
    1025             :                 /*
    1026             :                  * ww_mutex needs to always recheck its position since its waiter
    1027             :                  * list is not FIFO ordered.
    1028             :                  */
    1029          22 :                 if ((use_ww_ctx && ww_ctx) || !first) {
    1030          22 :                         first = __mutex_waiter_is_first(lock, &waiter);
    1031          22 :                         if (first)
    1032          22 :                                 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
    1033             :                 }
    1034             : 
    1035          22 :                 set_current_state(state);
    1036             :                 /*
    1037             :                  * Here we order against unlock; we must either see it change
    1038             :                  * state back to RUNNING and fall through the next schedule(),
    1039             :                  * or we must see its unlock and acquire.
    1040             :                  */
    1041          22 :                 if (__mutex_trylock(lock) ||
    1042           0 :                     (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
    1043             :                         break;
    1044             : 
    1045          23 :                 spin_lock(&lock->wait_lock);
    1046             :         }
    1047          22 :         spin_lock(&lock->wait_lock);
    1048          23 : acquired:
    1049          23 :         __set_current_state(TASK_RUNNING);
    1050             : 
    1051           0 :         if (use_ww_ctx && ww_ctx) {
    1052             :                 /*
    1053             :                  * Wound-Wait; we stole the lock (!first_waiter), check the
    1054             :                  * waiters as anyone might want to wound us.
    1055             :                  */
    1056           0 :                 if (!ww_ctx->is_wait_die &&
    1057           0 :                     !__mutex_waiter_is_first(lock, &waiter))
    1058           0 :                         __ww_mutex_check_waiters(lock, ww_ctx);
    1059             :         }
    1060             : 
    1061          23 :         mutex_remove_waiter(lock, &waiter, current);
    1062          23 :         if (likely(list_empty(&lock->wait_list)))
    1063          23 :                 __mutex_clear_flag(lock, MUTEX_FLAGS);
    1064             : 
    1065          23 :         debug_mutex_free_waiter(&waiter);
    1066             : 
    1067          33 : skip_wait:
    1068             :         /* got the lock - cleanup and rejoice! */
    1069          33 :         lock_acquired(&lock->dep_map, ip);
    1070             : 
    1071           0 :         if (use_ww_ctx && ww_ctx)
    1072           0 :                 ww_mutex_lock_acquired(ww, ww_ctx);
    1073             : 
    1074          33 :         spin_unlock(&lock->wait_lock);
    1075          33 :         preempt_enable();
    1076          33 :         return 0;
    1077             : 
    1078           0 : err:
    1079           0 :         __set_current_state(TASK_RUNNING);
    1080           0 :         mutex_remove_waiter(lock, &waiter, current);
    1081           0 : err_early_kill:
    1082           0 :         spin_unlock(&lock->wait_lock);
    1083           0 :         debug_mutex_free_waiter(&waiter);
    1084           0 :         mutex_release(&lock->dep_map, ip);
    1085           0 :         preempt_enable();
    1086           0 :         return ret;
    1087             : }
    1088             : 
    1089             : static int __sched
    1090      128067 : __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
    1091             :              struct lockdep_map *nest_lock, unsigned long ip)
    1092             : {
    1093      128067 :         return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
    1094             : }
    1095             : 
    1096             : static int __sched
    1097           0 : __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
    1098             :                 struct lockdep_map *nest_lock, unsigned long ip,
    1099             :                 struct ww_acquire_ctx *ww_ctx)
    1100             : {
    1101           0 :         return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
    1102             : }
    1103             : 
    1104             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
    1105             : void __sched
    1106      124027 : mutex_lock_nested(struct mutex *lock, unsigned int subclass)
    1107             : {
    1108      124027 :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
    1109      124043 : }
    1110             : 
    1111             : EXPORT_SYMBOL_GPL(mutex_lock_nested);
    1112             : 
    1113             : void __sched
    1114           0 : _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
    1115             : {
    1116           0 :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
    1117           0 : }
    1118             : EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
    1119             : 
    1120             : int __sched
    1121        2206 : mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
    1122             : {
    1123        2206 :         return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
    1124             : }
    1125             : EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
    1126             : 
    1127             : int __sched
    1128        1833 : mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
    1129             : {
    1130        1833 :         return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
    1131             : }
    1132             : EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
    1133             : 
    1134             : void __sched
    1135           1 : mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
    1136             : {
    1137           1 :         int token;
    1138             : 
    1139           1 :         might_sleep();
    1140             : 
    1141           1 :         token = io_schedule_prepare();
    1142           1 :         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
    1143           1 :                             subclass, NULL, _RET_IP_, NULL, 0);
    1144           1 :         io_schedule_finish(token);
    1145           1 : }
    1146             : EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
    1147             : 
    1148             : static inline int
    1149           0 : ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1150             : {
    1151             : #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
    1152           0 :         unsigned tmp;
    1153             : 
    1154           0 :         if (ctx->deadlock_inject_countdown-- == 0) {
    1155           0 :                 tmp = ctx->deadlock_inject_interval;
    1156           0 :                 if (tmp > UINT_MAX/4)
    1157             :                         tmp = UINT_MAX;
    1158             :                 else
    1159           0 :                         tmp = tmp*2 + tmp + tmp/2;
    1160             : 
    1161           0 :                 ctx->deadlock_inject_interval = tmp;
    1162           0 :                 ctx->deadlock_inject_countdown = tmp;
    1163           0 :                 ctx->contending_lock = lock;
    1164             : 
    1165           0 :                 ww_mutex_unlock(lock);
    1166             : 
    1167           0 :                 return -EDEADLK;
    1168             :         }
    1169             : #endif
    1170             : 
    1171             :         return 0;
    1172             : }
    1173             : 
    1174             : int __sched
    1175           0 : ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1176             : {
    1177           0 :         int ret;
    1178             : 
    1179           0 :         might_sleep();
    1180           0 :         ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
    1181           0 :                                0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
    1182             :                                ctx);
    1183           0 :         if (!ret && ctx && ctx->acquired > 1)
    1184           0 :                 return ww_mutex_deadlock_injection(lock, ctx);
    1185             : 
    1186             :         return ret;
    1187             : }
    1188             : EXPORT_SYMBOL_GPL(ww_mutex_lock);
    1189             : 
    1190             : int __sched
    1191           0 : ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1192             : {
    1193           0 :         int ret;
    1194             : 
    1195           0 :         might_sleep();
    1196           0 :         ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
    1197           0 :                               0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
    1198             :                               ctx);
    1199             : 
    1200           0 :         if (!ret && ctx && ctx->acquired > 1)
    1201           0 :                 return ww_mutex_deadlock_injection(lock, ctx);
    1202             : 
    1203             :         return ret;
    1204             : }
    1205             : EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
    1206             : 
    1207             : #endif
    1208             : 
    1209             : /*
    1210             :  * Release the lock, slowpath:
    1211             :  */
    1212      128352 : static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
    1213             : {
    1214      128352 :         struct task_struct *next = NULL;
    1215      128352 :         DEFINE_WAKE_Q(wake_q);
    1216      128352 :         unsigned long owner;
    1217             : 
    1218      128352 :         mutex_release(&lock->dep_map, ip);
    1219             : 
    1220             :         /*
    1221             :          * Release the lock before (potentially) taking the spinlock such that
    1222             :          * other contenders can get on with things ASAP.
    1223             :          *
    1224             :          * Except when HANDOFF, in that case we must not clear the owner field,
    1225             :          * but instead set it to the top waiter.
    1226             :          */
    1227      128343 :         owner = atomic_long_read(&lock->owner);
    1228      128343 :         for (;;) {
    1229      128343 :                 unsigned long old;
    1230             : 
    1231             : #ifdef CONFIG_DEBUG_MUTEXES
    1232      128343 :                 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
    1233      128343 :                 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
    1234             : #endif
    1235             : 
    1236      128343 :                 if (owner & MUTEX_FLAG_HANDOFF)
    1237             :                         break;
    1238             : 
    1239      256700 :                 old = atomic_long_cmpxchg_release(&lock->owner, owner,
    1240      128343 :                                                   __owner_flags(owner));
    1241      128357 :                 if (old == owner) {
    1242      128357 :                         if (owner & MUTEX_FLAG_WAITERS)
    1243             :                                 break;
    1244             : 
    1245      128203 :                         return;
    1246             :                 }
    1247             : 
    1248             :                 owner = old;
    1249             :         }
    1250             : 
    1251         154 :         spin_lock(&lock->wait_lock);
    1252         154 :         debug_mutex_unlock(lock);
    1253         154 :         if (!list_empty(&lock->wait_list)) {
    1254             :                 /* get the first entry from the wait-list: */
    1255         154 :                 struct mutex_waiter *waiter =
    1256         154 :                         list_first_entry(&lock->wait_list,
    1257             :                                          struct mutex_waiter, list);
    1258             : 
    1259         154 :                 next = waiter->task;
    1260             : 
    1261         154 :                 debug_mutex_wake_waiter(lock, waiter);
    1262         154 :                 wake_q_add(&wake_q, next);
    1263             :         }
    1264             : 
    1265         154 :         if (owner & MUTEX_FLAG_HANDOFF)
    1266           0 :                 __mutex_handoff(lock, next);
    1267             : 
    1268         154 :         spin_unlock(&lock->wait_lock);
    1269             : 
    1270         154 :         wake_up_q(&wake_q);
    1271             : }
    1272             : 
    1273             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
    1274             : /*
    1275             :  * Here come the less common (and hence less performance-critical) APIs:
    1276             :  * mutex_lock_interruptible() and mutex_trylock().
    1277             :  */
    1278             : static noinline int __sched
    1279             : __mutex_lock_killable_slowpath(struct mutex *lock);
    1280             : 
    1281             : static noinline int __sched
    1282             : __mutex_lock_interruptible_slowpath(struct mutex *lock);
    1283             : 
    1284             : /**
    1285             :  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
    1286             :  * @lock: The mutex to be acquired.
    1287             :  *
    1288             :  * Lock the mutex like mutex_lock().  If a signal is delivered while the
    1289             :  * process is sleeping, this function will return without acquiring the
    1290             :  * mutex.
    1291             :  *
    1292             :  * Context: Process context.
    1293             :  * Return: 0 if the lock was successfully acquired or %-EINTR if a
    1294             :  * signal arrived.
    1295             :  */
    1296             : int __sched mutex_lock_interruptible(struct mutex *lock)
    1297             : {
    1298             :         might_sleep();
    1299             : 
    1300             :         if (__mutex_trylock_fast(lock))
    1301             :                 return 0;
    1302             : 
    1303             :         return __mutex_lock_interruptible_slowpath(lock);
    1304             : }
    1305             : 
    1306             : EXPORT_SYMBOL(mutex_lock_interruptible);
    1307             : 
    1308             : /**
    1309             :  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
    1310             :  * @lock: The mutex to be acquired.
    1311             :  *
    1312             :  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
    1313             :  * the current process is delivered while the process is sleeping, this
    1314             :  * function will return without acquiring the mutex.
    1315             :  *
    1316             :  * Context: Process context.
    1317             :  * Return: 0 if the lock was successfully acquired or %-EINTR if a
    1318             :  * fatal signal arrived.
    1319             :  */
    1320             : int __sched mutex_lock_killable(struct mutex *lock)
    1321             : {
    1322             :         might_sleep();
    1323             : 
    1324             :         if (__mutex_trylock_fast(lock))
    1325             :                 return 0;
    1326             : 
    1327             :         return __mutex_lock_killable_slowpath(lock);
    1328             : }
    1329             : EXPORT_SYMBOL(mutex_lock_killable);
    1330             : 
    1331             : /**
    1332             :  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
    1333             :  * @lock: The mutex to be acquired.
    1334             :  *
    1335             :  * Lock the mutex like mutex_lock().  While the task is waiting for this
    1336             :  * mutex, it will be accounted as being in the IO wait state by the
    1337             :  * scheduler.
    1338             :  *
    1339             :  * Context: Process context.
    1340             :  */
    1341             : void __sched mutex_lock_io(struct mutex *lock)
    1342             : {
    1343             :         int token;
    1344             : 
    1345             :         token = io_schedule_prepare();
    1346             :         mutex_lock(lock);
    1347             :         io_schedule_finish(token);
    1348             : }
    1349             : EXPORT_SYMBOL_GPL(mutex_lock_io);
    1350             : 
    1351             : static noinline void __sched
    1352             : __mutex_lock_slowpath(struct mutex *lock)
    1353             : {
    1354             :         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
    1355             : }
    1356             : 
    1357             : static noinline int __sched
    1358             : __mutex_lock_killable_slowpath(struct mutex *lock)
    1359             : {
    1360             :         return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
    1361             : }
    1362             : 
    1363             : static noinline int __sched
    1364             : __mutex_lock_interruptible_slowpath(struct mutex *lock)
    1365             : {
    1366             :         return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
    1367             : }
    1368             : 
    1369             : static noinline int __sched
    1370             : __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1371             : {
    1372             :         return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
    1373             :                                _RET_IP_, ctx);
    1374             : }
    1375             : 
    1376             : static noinline int __sched
    1377             : __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
    1378             :                                             struct ww_acquire_ctx *ctx)
    1379             : {
    1380             :         return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
    1381             :                                _RET_IP_, ctx);
    1382             : }
    1383             : 
    1384             : #endif
    1385             : 
    1386             : /**
    1387             :  * mutex_trylock - try to acquire the mutex, without waiting
    1388             :  * @lock: the mutex to be acquired
    1389             :  *
    1390             :  * Try to acquire the mutex atomically. Returns 1 if the mutex
    1391             :  * has been acquired successfully, and 0 on contention.
    1392             :  *
    1393             :  * NOTE: this function follows the spin_trylock() convention, so
    1394             :  * it is negated from the down_trylock() return values! Be careful
    1395             :  * about this when converting semaphore users to mutexes.
    1396             :  *
    1397             :  * This function must not be used in interrupt context. The
    1398             :  * mutex must be released by the same task that acquired it.
    1399             :  */
    1400         279 : int __sched mutex_trylock(struct mutex *lock)
    1401             : {
    1402         279 :         bool locked;
    1403             : 
    1404             : #ifdef CONFIG_DEBUG_MUTEXES
    1405         279 :         DEBUG_LOCKS_WARN_ON(lock->magic != lock);
    1406             : #endif
    1407             : 
    1408         279 :         locked = __mutex_trylock(lock);
    1409         279 :         if (locked)
    1410         279 :                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
    1411             : 
    1412         279 :         return locked;
    1413             : }
    1414             : EXPORT_SYMBOL(mutex_trylock);
    1415             : 
    1416             : #ifndef CONFIG_DEBUG_LOCK_ALLOC
    1417             : int __sched
    1418             : ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1419             : {
    1420             :         might_sleep();
    1421             : 
    1422             :         if (__mutex_trylock_fast(&lock->base)) {
    1423             :                 if (ctx)
    1424             :                         ww_mutex_set_context_fastpath(lock, ctx);
    1425             :                 return 0;
    1426             :         }
    1427             : 
    1428             :         return __ww_mutex_lock_slowpath(lock, ctx);
    1429             : }
    1430             : EXPORT_SYMBOL(ww_mutex_lock);
    1431             : 
    1432             : int __sched
    1433             : ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    1434             : {
    1435             :         might_sleep();
    1436             : 
    1437             :         if (__mutex_trylock_fast(&lock->base)) {
    1438             :                 if (ctx)
    1439             :                         ww_mutex_set_context_fastpath(lock, ctx);
    1440             :                 return 0;
    1441             :         }
    1442             : 
    1443             :         return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
    1444             : }
    1445             : EXPORT_SYMBOL(ww_mutex_lock_interruptible);
    1446             : 
    1447             : #endif
    1448             : 
    1449             : /**
    1450             :  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
    1451             :  * @cnt: the atomic which we are to dec
    1452             :  * @lock: the mutex to return holding if we dec to 0
    1453             :  *
    1454             :  * return true and hold lock if we dec to 0, return false otherwise
    1455             :  */
    1456           0 : int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
    1457             : {
    1458             :         /* dec if we can't possibly hit 0 */
    1459           0 :         if (atomic_add_unless(cnt, -1, 1))
    1460             :                 return 0;
    1461             :         /* we might hit 0, so take the lock */
    1462           0 :         mutex_lock(lock);
    1463           0 :         if (!atomic_dec_and_test(cnt)) {
    1464             :                 /* when we actually did the dec, we didn't hit 0 */
    1465           0 :                 mutex_unlock(lock);
    1466           0 :                 return 0;
    1467             :         }
    1468             :         /* we hit 0, and we hold the lock */
    1469             :         return 1;
    1470             : }
    1471             : EXPORT_SYMBOL(atomic_dec_and_mutex_lock);

Generated by: LCOV version 1.14