LCOV - code coverage report
Current view: top level - kernel/locking - osq_lock.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 31 58 53.4 %
Date: 2021-04-22 12:43:58 Functions: 2 3 66.7 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/percpu.h>
       3             : #include <linux/sched.h>
       4             : #include <linux/osq_lock.h>
       5             : 
       6             : /*
       7             :  * An MCS like lock especially tailored for optimistic spinning for sleeping
       8             :  * lock implementations (mutex, rwsem, etc).
       9             :  *
      10             :  * Using a single mcs node per CPU is safe because sleeping locks should not be
      11             :  * called from interrupt context and we have preemption disabled while
      12             :  * spinning.
      13             :  */
      14             : static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
      15             : 
      16             : /*
      17             :  * We use the value 0 to represent "no CPU", thus the encoded value
      18             :  * will be the CPU number incremented by 1.
      19             :  */
      20        2024 : static inline int encode_cpu(int cpu_nr)
      21             : {
      22        2024 :         return cpu_nr + 1;
      23             : }
      24             : 
      25        2608 : static inline int node_cpu(struct optimistic_spin_node *node)
      26             : {
      27        2608 :         return node->cpu - 1;
      28             : }
      29             : 
      30           6 : static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
      31             : {
      32           6 :         int cpu_nr = encoded_cpu_val - 1;
      33             : 
      34           6 :         return per_cpu_ptr(&osq_node, cpu_nr);
      35             : }
      36             : 
      37             : /*
      38             :  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
      39             :  * Can return NULL in case we were the last queued and we updated @lock instead.
      40             :  */
      41             : static inline struct optimistic_spin_node *
      42           0 : osq_wait_next(struct optimistic_spin_queue *lock,
      43             :               struct optimistic_spin_node *node,
      44             :               struct optimistic_spin_node *prev)
      45             : {
      46           0 :         struct optimistic_spin_node *next = NULL;
      47           0 :         int curr = encode_cpu(smp_processor_id());
      48           0 :         int old;
      49             : 
      50             :         /*
      51             :          * If there is a prev node in queue, then the 'old' value will be
      52             :          * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
      53             :          * we're currently last in queue, then the queue will then become empty.
      54             :          */
      55           0 :         old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
      56             : 
      57           0 :         for (;;) {
      58           0 :                 if (atomic_read(&lock->tail) == curr &&
      59           0 :                     atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
      60             :                         /*
      61             :                          * We were the last queued, we moved @lock back. @prev
      62             :                          * will now observe @lock and will complete its
      63             :                          * unlock()/unqueue().
      64             :                          */
      65             :                         break;
      66             :                 }
      67             : 
      68             :                 /*
      69             :                  * We must xchg() the @node->next value, because if we were to
      70             :                  * leave it in, a concurrent unlock()/unqueue() from
      71             :                  * @node->next might complete Step-A and think its @prev is
      72             :                  * still valid.
      73             :                  *
      74             :                  * If the concurrent unlock()/unqueue() wins the race, we'll
      75             :                  * wait for either @lock to point to us, through its Step-B, or
      76             :                  * wait for a new @node->next from its Step-C.
      77             :                  */
      78           0 :                 if (node->next) {
      79           0 :                         next = xchg(&node->next, NULL);
      80           0 :                         if (next)
      81             :                                 break;
      82             :                 }
      83             : 
      84           0 :                 cpu_relax();
      85             :         }
      86             : 
      87           0 :         return next;
      88             : }
      89             : 
      90        1012 : bool osq_lock(struct optimistic_spin_queue *lock)
      91             : {
      92        1012 :         struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
      93        1012 :         struct optimistic_spin_node *prev, *next;
      94        1012 :         int curr = encode_cpu(smp_processor_id());
      95        1012 :         int old;
      96             : 
      97        1012 :         node->locked = 0;
      98        1012 :         node->next = NULL;
      99        1012 :         node->cpu = curr;
     100             : 
     101             :         /*
     102             :          * We need both ACQUIRE (pairs with corresponding RELEASE in
     103             :          * unlock() uncontended, or fastpath) and RELEASE (to publish
     104             :          * the node fields we just initialised) semantics when updating
     105             :          * the lock tail.
     106             :          */
     107        1012 :         old = atomic_xchg(&lock->tail, curr);
     108        1012 :         if (old == OSQ_UNLOCKED_VAL)
     109             :                 return true;
     110             : 
     111           6 :         prev = decode_cpu(old);
     112           6 :         node->prev = prev;
     113             : 
     114             :         /*
     115             :          * osq_lock()                   unqueue
     116             :          *
     117             :          * node->prev = prev         osq_wait_next()
     118             :          * WMB                          MB
     119             :          * prev->next = node         next->prev = prev // unqueue-C
     120             :          *
     121             :          * Here 'node->prev' and 'next->prev' are the same variable and we need
     122             :          * to ensure these stores happen in-order to avoid corrupting the list.
     123             :          */
     124           6 :         smp_wmb();
     125             : 
     126           6 :         WRITE_ONCE(prev->next, node);
     127             : 
     128             :         /*
     129             :          * Normally @prev is untouchable after the above store; because at that
     130             :          * moment unlock can proceed and wipe the node element from stack.
     131             :          *
     132             :          * However, since our nodes are static per-cpu storage, we're
     133             :          * guaranteed their existence -- this allows us to apply
     134             :          * cmpxchg in an attempt to undo our queueing.
     135             :          */
     136             : 
     137             :         /*
     138             :          * Wait to acquire the lock or cancelation. Note that need_resched()
     139             :          * will come with an IPI, which will wake smp_cond_load_relaxed() if it
     140             :          * is implemented with a monitor-wait. vcpu_is_preempted() relies on
     141             :          * polling, be careful.
     142             :          */
     143        5222 :         if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
     144             :                                   vcpu_is_preempted(node_cpu(node->prev))))
     145             :                 return true;
     146             : 
     147             :         /* unqueue */
     148             :         /*
     149             :          * Step - A  -- stabilize @prev
     150             :          *
     151             :          * Undo our @prev->next assignment; this will make @prev's
     152             :          * unlock()/unqueue() wait for a next pointer since @lock points to us
     153             :          * (or later).
     154             :          */
     155             : 
     156           0 :         for (;;) {
     157             :                 /*
     158             :                  * cpu_relax() below implies a compiler barrier which would
     159             :                  * prevent this comparison being optimized away.
     160             :                  */
     161           0 :                 if (data_race(prev->next) == node &&
     162           0 :                     cmpxchg(&prev->next, node, NULL) == node)
     163             :                         break;
     164             : 
     165             :                 /*
     166             :                  * We can only fail the cmpxchg() racing against an unlock(),
     167             :                  * in which case we should observe @node->locked becomming
     168             :                  * true.
     169             :                  */
     170           0 :                 if (smp_load_acquire(&node->locked))
     171             :                         return true;
     172             : 
     173           0 :                 cpu_relax();
     174             : 
     175             :                 /*
     176             :                  * Or we race against a concurrent unqueue()'s step-B, in which
     177             :                  * case its step-C will write us a new @node->prev pointer.
     178             :                  */
     179           0 :                 prev = READ_ONCE(node->prev);
     180             :         }
     181             : 
     182             :         /*
     183             :          * Step - B -- stabilize @next
     184             :          *
     185             :          * Similar to unlock(), wait for @node->next or move @lock from @node
     186             :          * back to @prev.
     187             :          */
     188             : 
     189           0 :         next = osq_wait_next(lock, node, prev);
     190           0 :         if (!next)
     191             :                 return false;
     192             : 
     193             :         /*
     194             :          * Step - C -- unlink
     195             :          *
     196             :          * @prev is stable because its still waiting for a new @prev->next
     197             :          * pointer, @next is stable because our @node->next pointer is NULL and
     198             :          * it will wait in Step-A.
     199             :          */
     200             : 
     201           0 :         WRITE_ONCE(next->prev, prev);
     202           0 :         WRITE_ONCE(prev->next, next);
     203             : 
     204           0 :         return false;
     205             : }
     206             : 
     207        1012 : void osq_unlock(struct optimistic_spin_queue *lock)
     208             : {
     209        1012 :         struct optimistic_spin_node *node, *next;
     210        1012 :         int curr = encode_cpu(smp_processor_id());
     211             : 
     212             :         /*
     213             :          * Fast path for the uncontended case.
     214             :          */
     215        2024 :         if (likely(atomic_cmpxchg_release(&lock->tail, curr,
     216             :                                           OSQ_UNLOCKED_VAL) == curr))
     217             :                 return;
     218             : 
     219             :         /*
     220             :          * Second most likely case.
     221             :          */
     222           6 :         node = this_cpu_ptr(&osq_node);
     223           6 :         next = xchg(&node->next, NULL);
     224           6 :         if (next) {
     225           6 :                 WRITE_ONCE(next->locked, 1);
     226           6 :                 return;
     227             :         }
     228             : 
     229           0 :         next = osq_wait_next(lock, node, NULL);
     230           0 :         if (next)
     231           0 :                 WRITE_ONCE(next->locked, 1);
     232             : }

Generated by: LCOV version 1.14